From 8d050b752c48aa2b9cfa9bcca306d2b5102697a8 Mon Sep 17 00:00:00 2001 From: aaron yang Date: Mon, 30 Oct 2023 23:47:31 +0800 Subject: [PATCH] Deployed a47b96e to 2.0.0a76 with MkDocs 1.4.2 and mike 1.1.2 --- 2.0.0.a76/api/plotting/metrics/index.html | 6 ++++-- 2.0.0.a76/search/search_index.json | 2 +- 2.0.0.a76/sitemap.xml.gz | Bin 210 -> 210 bytes 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/2.0.0.a76/api/plotting/metrics/index.html b/2.0.0.a76/api/plotting/metrics/index.html index b47af71..fb2e658 100644 --- a/2.0.0.a76/api/plotting/metrics/index.html +++ b/2.0.0.a76/api/plotting/metrics/index.html @@ -816,7 +816,8 @@

if indicator is not None: self.indicator = indicator.join( - pd.Series(index=self.frames, name="frames"), how="right" + pd.Series(index=self.frames, name="frames", dtype=np.float64), + how="right", ) else: self.indicator = None @@ -1182,7 +1183,8 @@

if indicator is not None: self.indicator = indicator.join( - pd.Series(index=self.frames, name="frames"), how="right" + pd.Series(index=self.frames, name="frames", dtype=np.float64), + how="right", ) else: self.indicator = None diff --git a/2.0.0.a76/search/search_index.json b/2.0.0.a76/search/search_index.json index a93dcec..9b112c1 100644 --- a/2.0.0.a76/search/search_index.json +++ b/2.0.0.a76/search/search_index.json @@ -1 +1 @@ -{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Omicron - Core Library for Zillionare Contents \u00b6 \u7b80\u4ecb \u00b6 Omicron\u662fZillionare\u7684\u6838\u5fc3\u6a21\u5757\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u529f\u80fd\uff1a \u884c\u60c5\u6570\u636e\u8bfb\u53d6\uff08\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u6982\u5ff5\u677f\u5757\u6570\u636e \uff0c\u4e5f\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u76f8\u5173\u64cd\u4f5c \u8bc1\u5238\u5217\u8868\u53ca\u76f8\u5173\u67e5\u8be2\u64cd\u4f5c numpy\u6570\u7ec4\u529f\u80fd\u6269\u5c55 \u6280\u672f\u6307\u6807\u53ca\u5f62\u6001\u5206\u6790\u529f\u80fd \u5404\u79cd\u5747\u7ebf\u3001\u66f2\u7ebf\u62df\u5408\u3001\u76f4\u7ebf\u659c\u7387\u548c\u5939\u89e3\u8ba1\u7b97\u3001\u66f2\u7ebf\u5e73\u6ed1\u51fd\u6570\u7b49\u3002 \u5f62\u6001\u5206\u6790\u529f\u80fd\uff0c\u5982\u4ea4\u53c9\u3001\u9876\u5e95\u641c\u7d22\u3001\u5e73\u53f0\u68c0\u6d4b\u3001RSI\u80cc\u79bb\u7b49\u3002 \u7b56\u7565\u7f16\u5199\u6846\u67b6 \uff0c\u4e0d\u4fee\u6539\u4ee3\u7801\u5373\u53ef\u540c\u65f6\u7528\u4e8e\u5b9e\u76d8\u4e0e\u56de\u6d4b\u3002 \u7ed8\u56fe\u529f\u80fd\u3002\u63d0\u4f9b\u4e86 \u4ea4\u4e92\u5f0fk\u7ebf\u56fe \u53ca \u56de\u6d4b\u62a5\u544a \u3002 \u5176\u5b83 \u4fee\u6b63Python\u7684round\u51fd\u6570\u9519\u8bef\uff0c\u6539\u7528 math_round \u5224\u65ad\u4ef7\u683c\u662f\u5426\u76f8\u7b49\u7684\u51fd\u6570\uff1a price_equal Omicron\u662f\u5927\u5bcc\u7fc1\u91cf\u5316\u6846\u67b6\u7684\u4e00\u90e8\u5206\u3002\u60a8\u5fc5\u987b\u81f3\u5c11\u5b89\u88c5\u5e76\u8fd0\u884c Omega \uff0c\u7136\u540e\u624d\u80fd\u5229\u7528omicron\u6765\u8bbf\u95ee\u4e0a\u8ff0\u6570\u636e\u3002 \u4f7f\u7528\u6587\u6863 Credits \u00b6 Zillionare-Omicron\u91c7\u7528 Python Project Wizard \u6784\u5efa\u3002","title":"\u7b80\u4ecb"},{"location":"#contents","text":"","title":"Contents"},{"location":"#\u7b80\u4ecb","text":"Omicron\u662fZillionare\u7684\u6838\u5fc3\u6a21\u5757\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u529f\u80fd\uff1a \u884c\u60c5\u6570\u636e\u8bfb\u53d6\uff08\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u6982\u5ff5\u677f\u5757\u6570\u636e \uff0c\u4e5f\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u76f8\u5173\u64cd\u4f5c \u8bc1\u5238\u5217\u8868\u53ca\u76f8\u5173\u67e5\u8be2\u64cd\u4f5c numpy\u6570\u7ec4\u529f\u80fd\u6269\u5c55 \u6280\u672f\u6307\u6807\u53ca\u5f62\u6001\u5206\u6790\u529f\u80fd \u5404\u79cd\u5747\u7ebf\u3001\u66f2\u7ebf\u62df\u5408\u3001\u76f4\u7ebf\u659c\u7387\u548c\u5939\u89e3\u8ba1\u7b97\u3001\u66f2\u7ebf\u5e73\u6ed1\u51fd\u6570\u7b49\u3002 \u5f62\u6001\u5206\u6790\u529f\u80fd\uff0c\u5982\u4ea4\u53c9\u3001\u9876\u5e95\u641c\u7d22\u3001\u5e73\u53f0\u68c0\u6d4b\u3001RSI\u80cc\u79bb\u7b49\u3002 \u7b56\u7565\u7f16\u5199\u6846\u67b6 \uff0c\u4e0d\u4fee\u6539\u4ee3\u7801\u5373\u53ef\u540c\u65f6\u7528\u4e8e\u5b9e\u76d8\u4e0e\u56de\u6d4b\u3002 \u7ed8\u56fe\u529f\u80fd\u3002\u63d0\u4f9b\u4e86 \u4ea4\u4e92\u5f0fk\u7ebf\u56fe \u53ca \u56de\u6d4b\u62a5\u544a \u3002 \u5176\u5b83 \u4fee\u6b63Python\u7684round\u51fd\u6570\u9519\u8bef\uff0c\u6539\u7528 math_round \u5224\u65ad\u4ef7\u683c\u662f\u5426\u76f8\u7b49\u7684\u51fd\u6570\uff1a price_equal Omicron\u662f\u5927\u5bcc\u7fc1\u91cf\u5316\u6846\u67b6\u7684\u4e00\u90e8\u5206\u3002\u60a8\u5fc5\u987b\u81f3\u5c11\u5b89\u88c5\u5e76\u8fd0\u884c Omega \uff0c\u7136\u540e\u624d\u80fd\u5229\u7528omicron\u6765\u8bbf\u95ee\u4e0a\u8ff0\u6570\u636e\u3002 \u4f7f\u7528\u6587\u6863","title":"\u7b80\u4ecb"},{"location":"#credits","text":"Zillionare-Omicron\u91c7\u7528 Python Project Wizard \u6784\u5efa\u3002","title":"Credits"},{"location":"developer/","text":"Omicron\u7684\u5f00\u53d1\u6d41\u7a0b \u00b6 Omicron\u9075\u5faa ppw \u5b9a\u4e49\u7684\u5f00\u53d1\u6d41\u7a0b\u548c\u4ee3\u7801\u89c4\u8303\u3002\u60a8\u53ef\u4ee5\u9605\u8bfb tutorial \u6765\u4e86\u89e3\u66f4\u591a\u3002 \u7b80\u5355\u6765\u8bf4\uff0c\u901a\u8fc7ppw\u6784\u5efa\u7684\u5de5\u7a0b\uff0c\u5177\u6709\u4ee5\u4e0b\u80fd\u529b\uff1a \u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406 \u00b6 \u901a\u8fc7poetry add\u7ed9\u9879\u76ee\u589e\u52a0\u65b0\u7684\u4f9d\u8d56\u3002\u5982\u679c\u4f9d\u8d56\u9879\u4ec5\u5728\u5f00\u53d1\u73af\u5883\u4e0b\u4f7f\u7528\uff0c\u8bf7\u589e\u52a0\u4e3aExtra\u9879\uff0c\u5e76\u6b63\u786e\u5f52\u7c7b\u4e3adev, doc\u548ctest\u4e2d\u7684\u4e00\u7c7b\u3002 \u4f7f\u7528poetry lock\u6765\u9501\u5b9a\u4f9d\u8d56\u7684\u7248\u672c\u3002 \u4f7f\u7528poetry update\u66f4\u65b0\u4f9d\u8d56\u9879\u3002 flake8, isort, black \u00b6 omicron\u4f7f\u7528flake8, isort\u548cblack\u8fdb\u884c\u8bed\u6cd5\u68c0\u67e5\u548c\u4ee3\u7801\u683c\u5f0f\u5316 pre-commit \u00b6 \u4f7f\u7528pre-commit\u6765\u786e\u4fdd\u63d0\u4ea4\u7684\u4ee3\u7801\u90fd\u7b26\u5408\u89c4\u8303\u3002\u5982\u679c\u662f\u521a\u4e0b\u8f7d\u4ee3\u7801\uff0c\u8bf7\u8fd0\u884cpre-commit install\u5b89\u88c5\u94a9\u5b50\u3002 TODO: \u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d \u00b6 \u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\uff1f \u00b6 \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf \u00b6 Omicron\u5728notify\u5305\u4e2d\u63d0\u4f9b\u4e86\u53d1\u9001\u90ae\u4ef6\u548c\u9489\u9489\u6d88\u606f\u7684\u529f\u80fd\u3002\u5728\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\u524d\uff0c\u9700\u8981\u8bbe\u7f6e\u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff1a 1 2 3 4 5 6 7 DINGTALK_ACCESS_TOKEN = ? DINGTALK_SECRET = ? export MAIL_FROM = ? export MAIL_SERVER = ? export MAIL_TO = ? export MAIL_PASSWORD = ? \u4e0a\u8ff0\u73af\u5883\u53d8\u91cf\u5df2\u5728gh://zillionare/omicron\u4e2d\u8bbe\u7f6e\u3002\u5982\u679c\u60a8fork\u4e86omicron\u5e76\u4e14\u60f3\u901a\u8fc7github actions\u8fdb\u884c\u6d4b\u8bd5\uff0c\u8bf7\u5728\u60a8\u7684repo\u4e2d\u8bbe\u7f6e\u76f8\u5e94\u7684secrets\u3002 \u542f\u52a8\u6d4b\u8bd5 \u00b6 \u901a\u8fc7tox\u6765\u8fd0\u884c\u6d4b\u8bd5\u3002tox\u5c06\u542f\u52a8\u5fc5\u8981\u7684\u6d4b\u8bd5\u73af\u5883\uff08\u901a\u8fc7 stop_service.sh \u548c start_service.sh \uff09\u3002 \u6587\u6863 \u00b6 \u6587\u6863\u7531\u4e24\u90e8\u5206\u7ec4\u6210\u3002\u4e00\u90e8\u5206\u662f\u9879\u76ee\u6587\u6863\uff0c\u5b58\u653e\u5728docs\u76ee\u5f55\u4e0b\u3002\u53e6\u4e00\u90e8\u5206\u662fAPI\u6587\u6863\uff0c\u5b83\u4eec\u4ece\u6e90\u4ee3\u7801\u7684\u6ce8\u91ca\u4e2d\u63d0\u53d6\u3002\u751f\u6210\u6587\u6863\u7684\u5de5\u5177\u662fmkdocs\u3002API\u6587\u6863\u7684\u63d0\u53d6\u5219\u7531mkdocs\u7684\u63d2\u4ef6mkdocstrings\u63d0\u53d6\u3002","title":"\u5f00\u53d1\u8005\u6587\u6863"},{"location":"developer/#omicron\u7684\u5f00\u53d1\u6d41\u7a0b","text":"Omicron\u9075\u5faa ppw \u5b9a\u4e49\u7684\u5f00\u53d1\u6d41\u7a0b\u548c\u4ee3\u7801\u89c4\u8303\u3002\u60a8\u53ef\u4ee5\u9605\u8bfb tutorial \u6765\u4e86\u89e3\u66f4\u591a\u3002 \u7b80\u5355\u6765\u8bf4\uff0c\u901a\u8fc7ppw\u6784\u5efa\u7684\u5de5\u7a0b\uff0c\u5177\u6709\u4ee5\u4e0b\u80fd\u529b\uff1a","title":"Omicron\u7684\u5f00\u53d1\u6d41\u7a0b"},{"location":"developer/#\u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406","text":"\u901a\u8fc7poetry add\u7ed9\u9879\u76ee\u589e\u52a0\u65b0\u7684\u4f9d\u8d56\u3002\u5982\u679c\u4f9d\u8d56\u9879\u4ec5\u5728\u5f00\u53d1\u73af\u5883\u4e0b\u4f7f\u7528\uff0c\u8bf7\u589e\u52a0\u4e3aExtra\u9879\uff0c\u5e76\u6b63\u786e\u5f52\u7c7b\u4e3adev, doc\u548ctest\u4e2d\u7684\u4e00\u7c7b\u3002 \u4f7f\u7528poetry lock\u6765\u9501\u5b9a\u4f9d\u8d56\u7684\u7248\u672c\u3002 \u4f7f\u7528poetry update\u66f4\u65b0\u4f9d\u8d56\u9879\u3002","title":"\u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406"},{"location":"developer/#flake8-isort-black","text":"omicron\u4f7f\u7528flake8, isort\u548cblack\u8fdb\u884c\u8bed\u6cd5\u68c0\u67e5\u548c\u4ee3\u7801\u683c\u5f0f\u5316","title":"flake8, isort, black"},{"location":"developer/#pre-commit","text":"\u4f7f\u7528pre-commit\u6765\u786e\u4fdd\u63d0\u4ea4\u7684\u4ee3\u7801\u90fd\u7b26\u5408\u89c4\u8303\u3002\u5982\u679c\u662f\u521a\u4e0b\u8f7d\u4ee3\u7801\uff0c\u8bf7\u8fd0\u884cpre-commit install\u5b89\u88c5\u94a9\u5b50\u3002","title":"pre-commit"},{"location":"developer/#todo-\u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d","text":"","title":"TODO: \u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d"},{"location":"developer/#\u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5","text":"","title":"\u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\uff1f"},{"location":"developer/#\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf","text":"Omicron\u5728notify\u5305\u4e2d\u63d0\u4f9b\u4e86\u53d1\u9001\u90ae\u4ef6\u548c\u9489\u9489\u6d88\u606f\u7684\u529f\u80fd\u3002\u5728\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\u524d\uff0c\u9700\u8981\u8bbe\u7f6e\u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff1a 1 2 3 4 5 6 7 DINGTALK_ACCESS_TOKEN = ? DINGTALK_SECRET = ? export MAIL_FROM = ? export MAIL_SERVER = ? export MAIL_TO = ? export MAIL_PASSWORD = ? \u4e0a\u8ff0\u73af\u5883\u53d8\u91cf\u5df2\u5728gh://zillionare/omicron\u4e2d\u8bbe\u7f6e\u3002\u5982\u679c\u60a8fork\u4e86omicron\u5e76\u4e14\u60f3\u901a\u8fc7github actions\u8fdb\u884c\u6d4b\u8bd5\uff0c\u8bf7\u5728\u60a8\u7684repo\u4e2d\u8bbe\u7f6e\u76f8\u5e94\u7684secrets\u3002","title":"\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf"},{"location":"developer/#\u542f\u52a8\u6d4b\u8bd5","text":"\u901a\u8fc7tox\u6765\u8fd0\u884c\u6d4b\u8bd5\u3002tox\u5c06\u542f\u52a8\u5fc5\u8981\u7684\u6d4b\u8bd5\u73af\u5883\uff08\u901a\u8fc7 stop_service.sh \u548c start_service.sh \uff09\u3002","title":"\u542f\u52a8\u6d4b\u8bd5"},{"location":"developer/#\u6587\u6863","text":"\u6587\u6863\u7531\u4e24\u90e8\u5206\u7ec4\u6210\u3002\u4e00\u90e8\u5206\u662f\u9879\u76ee\u6587\u6863\uff0c\u5b58\u653e\u5728docs\u76ee\u5f55\u4e0b\u3002\u53e6\u4e00\u90e8\u5206\u662fAPI\u6587\u6863\uff0c\u5b83\u4eec\u4ece\u6e90\u4ee3\u7801\u7684\u6ce8\u91ca\u4e2d\u63d0\u53d6\u3002\u751f\u6210\u6587\u6863\u7684\u5de5\u5177\u662fmkdocs\u3002API\u6587\u6863\u7684\u63d0\u53d6\u5219\u7531mkdocs\u7684\u63d2\u4ef6mkdocstrings\u63d0\u53d6\u3002","title":"\u6587\u6863"},{"location":"history/","text":"History \u00b6 2.0.0-alpha76 \u00b6 \u589e\u52a0backtestlog\u6a21\u5757\uff0c\u7528\u4e8e\u8f93\u51fa\u56de\u6d4b\u65e5\u5fd7\u65f6\uff0c\u5c06\u65f6\u95f4\u66ff\u6362\u4e3a\u56de\u6d4b\u65f6\u95f4 \u589e\u52a0\u884c\u60c5\u9884\u53d6\u529f\u80fd \u589e\u52a0\u56de\u6d4b\u62a5\u544a\u4e2d\u7ed8\u5236\u81ea\u5b9a\u4e49\u6307\u6807\u529f\u80fd\uff08\u4ec5\u652f\u6301Scatter) 2.0.0-alpha.69 \u00b6 BaseStrategy\u589e\u52a0 available_shares \u65b9\u6cd5 2.0.0-alpha.68 \u00b6 \u589e\u52a0\u4e86MetricsGraph \u589e\u52a0Strategy\u57fa\u7c7b Candlestick\u589e\u52a0\u4e86\u5e03\u6797\u5e26\u6307\u6807 2.0.0-alpha.49 (2022-09-16) \u00b6 \u4fee\u8ba2\u4e86\u5b89\u88c5\u6587\u6863\u3002 \u79fb\u9664\u4e86windows\u4e0b\u5bf9ta-lib\u7684\u4f9d\u8d56\u3002\u8bf7\u53c2\u8003 \u5b89\u88c5\u6307\u5357 \u4ee5\u83b7\u53d6\u5728windows\u4e0b\u5b89\u88c5ta-lib\u7684\u65b9\u6cd5\u3002 \u66f4\u65b0\u4e86poetry.lock\u6587\u4ef6\u3002\u5728\u4e0a\u4e00\u7248\u4e2d\uff0c\u8be5\u6587\u4ef6\u4e0epyproject.toml\u4e0d\u540c\u6b65\uff0c\u5bfc\u81f4\u5b89\u88c5\u65f6\u8fdb\u884c\u7248\u672c\u9501\u5b9a\uff0c\u5ef6\u957f\u4e86\u5b89\u88c5\u65f6\u95f4\u3002 \u4fee\u590d\u4e86k\u7ebf\u56fe\u6807\u8bb0\u9876\u548c\u5e95\u65f6\uff0c\u6807\u8bb0\u79bb\u88ab\u6807\u6ce8\u7684\u70b9\u592a\u8fdc\u7684\u95ee\u9898\u3002 2.0.0-alpha.46 (2022-09-10) \u00b6 #40 \u589e\u52a0k\u7ebf\u56fe\u7ed8\u5236\u529f\u80fd\u3002 \u672c\u6b21\u4fee\u8ba2\u589e\u52a0\u4e86\u5bf9plotly, ckwrap, ta-lib\u7684\u4f9d\u8d56\u3002 \u5c06\u539f\u5c5e\u4e8eomicron.talib\u5305\u4e2d\u7684bars_since, find_runs\u7b49\u8ddf\u6570\u7ec4\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u79fb\u5165omicron.extensions.np\u4e2d\u3002 2.0.0-alpha.45 (2022-09-08) \u00b6 #39 fixed. removed dependency of postgres removed funds update arrow's version to be great than 1.2 lock aiohttp's version to >3.8, <4.0> 2.0.0-alpha.35 (2022-07-13) \u00b6 fix issue in security exit date comparison, Security.eval(). 2.0.0-alpha.34 (2022-07-13) \u00b6 change to sync call for Security.select() date parameter of Security.select(): if date >= today, it will use the data in cache, otherwise, query from database. 0.3.1 (2020-12-11) \u00b6 this version introduced no features, just a internal amendment release, we're migrating to poetry build system. 0.3.0 (2020-11-22) \u00b6 Calendar, Triggers and time frame calculation Security list Bars with turnover Valuation 0.1.0 (2020-04-28) \u00b6 First release on PyPI.","title":"\u7248\u672c\u5386\u53f2"},{"location":"history/#history","text":"","title":"History"},{"location":"history/#200-alpha76","text":"\u589e\u52a0backtestlog\u6a21\u5757\uff0c\u7528\u4e8e\u8f93\u51fa\u56de\u6d4b\u65e5\u5fd7\u65f6\uff0c\u5c06\u65f6\u95f4\u66ff\u6362\u4e3a\u56de\u6d4b\u65f6\u95f4 \u589e\u52a0\u884c\u60c5\u9884\u53d6\u529f\u80fd \u589e\u52a0\u56de\u6d4b\u62a5\u544a\u4e2d\u7ed8\u5236\u81ea\u5b9a\u4e49\u6307\u6807\u529f\u80fd\uff08\u4ec5\u652f\u6301Scatter)","title":"2.0.0-alpha76"},{"location":"history/#200-alpha69","text":"BaseStrategy\u589e\u52a0 available_shares \u65b9\u6cd5","title":"2.0.0-alpha.69"},{"location":"history/#200-alpha68","text":"\u589e\u52a0\u4e86MetricsGraph \u589e\u52a0Strategy\u57fa\u7c7b Candlestick\u589e\u52a0\u4e86\u5e03\u6797\u5e26\u6307\u6807","title":"2.0.0-alpha.68"},{"location":"history/#200-alpha49-2022-09-16","text":"\u4fee\u8ba2\u4e86\u5b89\u88c5\u6587\u6863\u3002 \u79fb\u9664\u4e86windows\u4e0b\u5bf9ta-lib\u7684\u4f9d\u8d56\u3002\u8bf7\u53c2\u8003 \u5b89\u88c5\u6307\u5357 \u4ee5\u83b7\u53d6\u5728windows\u4e0b\u5b89\u88c5ta-lib\u7684\u65b9\u6cd5\u3002 \u66f4\u65b0\u4e86poetry.lock\u6587\u4ef6\u3002\u5728\u4e0a\u4e00\u7248\u4e2d\uff0c\u8be5\u6587\u4ef6\u4e0epyproject.toml\u4e0d\u540c\u6b65\uff0c\u5bfc\u81f4\u5b89\u88c5\u65f6\u8fdb\u884c\u7248\u672c\u9501\u5b9a\uff0c\u5ef6\u957f\u4e86\u5b89\u88c5\u65f6\u95f4\u3002 \u4fee\u590d\u4e86k\u7ebf\u56fe\u6807\u8bb0\u9876\u548c\u5e95\u65f6\uff0c\u6807\u8bb0\u79bb\u88ab\u6807\u6ce8\u7684\u70b9\u592a\u8fdc\u7684\u95ee\u9898\u3002","title":"2.0.0-alpha.49 (2022-09-16)"},{"location":"history/#200-alpha46-2022-09-10","text":"#40 \u589e\u52a0k\u7ebf\u56fe\u7ed8\u5236\u529f\u80fd\u3002 \u672c\u6b21\u4fee\u8ba2\u589e\u52a0\u4e86\u5bf9plotly, ckwrap, ta-lib\u7684\u4f9d\u8d56\u3002 \u5c06\u539f\u5c5e\u4e8eomicron.talib\u5305\u4e2d\u7684bars_since, find_runs\u7b49\u8ddf\u6570\u7ec4\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u79fb\u5165omicron.extensions.np\u4e2d\u3002","title":"2.0.0-alpha.46 (2022-09-10)"},{"location":"history/#200-alpha45-2022-09-08","text":"#39 fixed. removed dependency of postgres removed funds update arrow's version to be great than 1.2 lock aiohttp's version to >3.8, <4.0>","title":"2.0.0-alpha.45 (2022-09-08)"},{"location":"history/#200-alpha35-2022-07-13","text":"fix issue in security exit date comparison, Security.eval().","title":"2.0.0-alpha.35 (2022-07-13)"},{"location":"history/#200-alpha34-2022-07-13","text":"change to sync call for Security.select() date parameter of Security.select(): if date >= today, it will use the data in cache, otherwise, query from database.","title":"2.0.0-alpha.34 (2022-07-13)"},{"location":"history/#031-2020-12-11","text":"this version introduced no features, just a internal amendment release, we're migrating to poetry build system.","title":"0.3.1 (2020-12-11)"},{"location":"history/#030-2020-11-22","text":"Calendar, Triggers and time frame calculation Security list Bars with turnover Valuation","title":"0.3.0 (2020-11-22)"},{"location":"history/#010-2020-04-28","text":"First release on PyPI.","title":"0.1.0 (2020-04-28)"},{"location":"installation/","text":"1. \u5b89\u88c5 \u00b6 \u8981\u4f7f\u7528Omicron\u6765\u83b7\u53d6\u884c\u60c5\u6570\u636e\uff0c\u8bf7\u5148\u5b89\u88c5 Omega \uff0c\u5e76\u6309\u8bf4\u660e\u6587\u6863\u8981\u6c42\u5b8c\u6210\u521d\u59cb\u5316\u914d\u7f6e\u3002 \u7136\u540e\u5728\u5f00\u53d1\u673a\u4e0a\uff0c\u8fd0\u884c\u4e0b\u9762\u7684\u547d\u4ee4\u5b89\u88c5Omicron: 1 pip install zillionare-omicron omicron\u4f9d\u8d56numpy, pandas, scipy, sklearn\u3002\u8fd9\u4e9b\u5e93\u7684\u4f53\u79ef\u6bd4\u8f83\u5927\uff0c\u56e0\u6b64\u5728\u5b89\u88c5omicron\u65f6\uff0c\u8bf7\u4fdd\u6301\u7f51\u7edc\u8fde\u63a5\u7545\u901a\uff0c\u5fc5\u8981\u65f6\uff0c\u8bf7\u6dfb\u52a0\u963f\u91cc\u6216\u8005\u6e05\u534e\u7684PyPI\u955c\u50cf\u3002 omicron\u8fd8\u4f9d\u8d56\u4e8etalib, zigzag, ciso8601\u7b49\u9ad8\u6027\u80fd\u7684C/C++\u5e93\u3002\u5b89\u88c5\u8fd9\u4e9b\u5e93\u5f80\u5f80\u9700\u8981\u5728\u60a8\u672c\u673a\u6267\u884c\u4e00\u4e2a\u7f16\u8bd1\u8fc7\u7a0b\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6b65\u9aa4\u5b8c\u6210\uff1a \u5b89\u88c5\u539f\u751f\u5e93 Windows Linux MacOS \u6ce8\u610f\u6211\u4eec\u4e0d\u652f\u630132\u4f4dwindows \u8bf7\u8ddf\u968f windows\u4e0b\u5b89\u88c5omicron \u6765\u5b8c\u6210\u5b89\u88c5\u3002 \u8bf7\u6267\u884c\u4e0b\u9762\u7684\u811a\u672c\u4ee5\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 1 2 3 4 5 6 7 sudo apt update && sudo apt upgrade -y && sudo apt autoremove -y sudo apt-get install build-essential -y curl -L http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz | tar -xzv -C /tmp/ cd /tmp/ta-lib ./configure --prefix = /usr make sudo make install \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 \u8bf7\u901a\u8fc7 brew install ta-lib \u6765\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u90fd\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 2. \u5e38\u89c1\u95ee\u9898 \u00b6 \u65e0\u6cd5\u8bbf\u95eeaka.ms \u00b6 \u5982\u679c\u9047\u5230aka.ms\u65e0\u6cd5\u8bbf\u95ee\u7684\u95ee\u9898\uff0c\u6709\u53ef\u80fd\u662fIP\u5730\u5740\u89e3\u6790\u7684\u95ee\u9898\u3002\u8bf7\u4ee5\u7ba1\u7406\u5458\u6743\u9650\uff0c\u6253\u5f00\u5e76\u7f16\u8f91\u4f4d\u4e8ec:\\windows\\system32\\drivers\\etc\\\u4e0b\u7684hosts\u6587\u4ef6\uff0c\u5c06\u6b64\u884c\u52a0\u5165\u5230\u6587\u4ef6\u4e2d\uff1a 1 23.41.86.106 aka.ms","title":"\u5b89\u88c5"},{"location":"installation/#1-\u5b89\u88c5","text":"\u8981\u4f7f\u7528Omicron\u6765\u83b7\u53d6\u884c\u60c5\u6570\u636e\uff0c\u8bf7\u5148\u5b89\u88c5 Omega \uff0c\u5e76\u6309\u8bf4\u660e\u6587\u6863\u8981\u6c42\u5b8c\u6210\u521d\u59cb\u5316\u914d\u7f6e\u3002 \u7136\u540e\u5728\u5f00\u53d1\u673a\u4e0a\uff0c\u8fd0\u884c\u4e0b\u9762\u7684\u547d\u4ee4\u5b89\u88c5Omicron: 1 pip install zillionare-omicron omicron\u4f9d\u8d56numpy, pandas, scipy, sklearn\u3002\u8fd9\u4e9b\u5e93\u7684\u4f53\u79ef\u6bd4\u8f83\u5927\uff0c\u56e0\u6b64\u5728\u5b89\u88c5omicron\u65f6\uff0c\u8bf7\u4fdd\u6301\u7f51\u7edc\u8fde\u63a5\u7545\u901a\uff0c\u5fc5\u8981\u65f6\uff0c\u8bf7\u6dfb\u52a0\u963f\u91cc\u6216\u8005\u6e05\u534e\u7684PyPI\u955c\u50cf\u3002 omicron\u8fd8\u4f9d\u8d56\u4e8etalib, zigzag, ciso8601\u7b49\u9ad8\u6027\u80fd\u7684C/C++\u5e93\u3002\u5b89\u88c5\u8fd9\u4e9b\u5e93\u5f80\u5f80\u9700\u8981\u5728\u60a8\u672c\u673a\u6267\u884c\u4e00\u4e2a\u7f16\u8bd1\u8fc7\u7a0b\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6b65\u9aa4\u5b8c\u6210\uff1a \u5b89\u88c5\u539f\u751f\u5e93 Windows Linux MacOS \u6ce8\u610f\u6211\u4eec\u4e0d\u652f\u630132\u4f4dwindows \u8bf7\u8ddf\u968f windows\u4e0b\u5b89\u88c5omicron \u6765\u5b8c\u6210\u5b89\u88c5\u3002 \u8bf7\u6267\u884c\u4e0b\u9762\u7684\u811a\u672c\u4ee5\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 1 2 3 4 5 6 7 sudo apt update && sudo apt upgrade -y && sudo apt autoremove -y sudo apt-get install build-essential -y curl -L http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz | tar -xzv -C /tmp/ cd /tmp/ta-lib ./configure --prefix = /usr make sudo make install \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 \u8bf7\u901a\u8fc7 brew install ta-lib \u6765\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u90fd\u5c06\u81ea\u52a8\u5b8c\u6210\u3002","title":"1. \u5b89\u88c5"},{"location":"installation/#2-\u5e38\u89c1\u95ee\u9898","text":"","title":"2. \u5e38\u89c1\u95ee\u9898"},{"location":"installation/#\u65e0\u6cd5\u8bbf\u95eeakams","text":"\u5982\u679c\u9047\u5230aka.ms\u65e0\u6cd5\u8bbf\u95ee\u7684\u95ee\u9898\uff0c\u6709\u53ef\u80fd\u662fIP\u5730\u5740\u89e3\u6790\u7684\u95ee\u9898\u3002\u8bf7\u4ee5\u7ba1\u7406\u5458\u6743\u9650\uff0c\u6253\u5f00\u5e76\u7f16\u8f91\u4f4d\u4e8ec:\\windows\\system32\\drivers\\etc\\\u4e0b\u7684hosts\u6587\u4ef6\uff0c\u5c06\u6b64\u884c\u52a0\u5165\u5230\u6587\u4ef6\u4e2d\uff1a 1 23.41.86.106 aka.ms","title":"\u65e0\u6cd5\u8bbf\u95eeaka.ms"},{"location":"usage/","text":"1. \u914d\u7f6e\u3001\u521d\u59cb\u5316\u548c\u5173\u95ed OMICRON \u00b6 Omicron \u4f9d\u8d56\u4e8e zillionare-omega \u670d\u52a1\u6765\u83b7\u53d6\u6570\u636e\u3002\u4f46\u5b83\u5e76\u4e0d\u76f4\u63a5\u4e0e Omega \u670d\u52a1\u901a\u8baf\uff0c\u76f8\u53cd\uff0c\u5b83\u76f4\u63a5\u8bfb\u53d6 Omega \u670d\u52a1\u5668\u4f1a\u5199\u5165\u6570\u636e\u7684 Influxdb \u548c redis \u6570\u636e\u5e93\u3002\u56e0\u6b64\uff0c\u5728\u4f7f\u7528 Omicron \u4e4b\u524d\uff0c\u6211\u4eec\u9700\u8981\u63d0\u4f9b\u8fd9\u4e24\u4e2a\u670d\u52a1\u5668\u7684\u8fde\u63a5\u5730\u5740\uff0c\u5e76\u8fdb\u884c\u521d\u59cb\u5316\u3002 1.1. \u914d\u7f6e\u548c\u521d\u59cb\u5316 \u00b6 Omicron \u4f7f\u7528 cfg4py \u6765\u7ba1\u7406\u914d\u7f6e\u3002 cfg4py \u4f7f\u7528 yaml \u6587\u4ef6\u6765\u4fdd\u5b58\u914d\u7f6e\u9879\u3002\u5728\u4f7f\u7528 cfg4py \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u5728\u67d0\u5904\u521d\u59cb\u5316 cfg4py\uff0c\u7136\u540e\u518d\u521d\u59cb\u5316 omicron: Tip \u4e3a\u4e86\u7b80\u6d01\u8d77\u89c1\uff0c\u6211\u4eec\u5728\u9876\u5c42\u4ee3\u7801\u4e2d\u76f4\u63a5\u4f7f\u7528\u4e86 async/await\u3002\u901a\u5e38\uff0c\u8fd9\u4e9b\u4ee3\u7801\u80fd\u591f\u76f4\u63a5\u5728 notebook \u4e2d\u8fd0\u884c\uff0c\u4f46\u5982\u679c\u9700\u8981\u5728\u666e\u901a\u7684 python \u811a\u672c\u4e2d\u8fd0\u884c\u8fd9\u4e9b\u4ee3\u7801\uff0c\u60a8\u901a\u5e38\u9700\u8981\u5c06\u5176\u5c01\u88c5\u5230\u4e00\u4e2a\u5f02\u6b65\u51fd\u6570\u4e2d\uff0c\u518d\u901a\u8fc7 asyncio.run \u6765\u8fd0\u884c\u5b83\u3002 1 2 3 4 5 6 7 8 9 import asyncio import cfg4py import omicron async def main (): cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () # DO YOUR GREAT JOB WITH OMICRON asyncio . run ( main ()) 1 2 3 4 5 import cfg4py import omicron cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () \u6ce8\u610f\u521d\u59cb\u5316 cfg4py \u65f6\uff0c\u9700\u8981\u63d0\u4f9b\u5305\u542b\u914d\u7f6e\u6587\u4ef6\u7684 \u6587\u4ef6\u5939 \u7684\u8def\u5f84\uff0c\u800c \u4e0d\u662f\u914d\u7f6e\u6587\u4ef6 \u7684\u8def\u5f84\u3002\u914d\u7f6e\u6587\u4ef6\u540d\u5fc5\u987b\u4e3a defaults.yml\u3002 \u60a8\u81f3\u5c11\u5e94\u8be5\u4e3a omicron \u914d\u7f6e Redis \u8fde\u63a5\u4e32\u548c influxdb \u8fde\u63a5\u4e32\u3002\u4e0b\u9762\u662f\u5e38\u7528\u914d\u7f6e\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 # DEFAULTS.YAML redis : dsn : redis://${REDIS_HOST}:${REDIS_PORT} influxdb : url : http://${INFLUXDB_HOST}:${INFLUXDB_PORT} token : ${INFLUXDB_TOKEN} org : ${INFLUXDB_ORG} bucket_name : ${INFLUXDB_BUCKET_NAME} enable_compress : true max_query_size : 150000 notify : mail_from : ${MAIL_FROM} mail_to : - ${MAIL_TO} mail_server : ${MAIL_SERVER} dingtalk_access_token : ${DINGTALK_ACCESS_TOKEN} dingtalk_secret : ${DINGTALK_SECRET} \u8bf7\u6839\u636e\u60a8\u5b9e\u9645\u73af\u5883\u914d\u7f6e\u6765\u66f4\u6539\u4e0a\u8ff0\u6587\u4ef6\u3002\u4e0a\u8ff0\u914d\u7f6e\u4e2d\uff0c${{REDIS_HOST}}\u610f\u5473\u7740\u73af\u5883\u53d8\u91cf\u3002\u5982\u679c\u662f windows\uff0c\u60a8\u9700\u8981\u5728\u7cfb\u7edf > \u73af\u5883\u53d8\u91cf\u4e2d\u8fdb\u884c\u8bbe\u7f6e\u3002\u5982\u679c\u662f Linux \u6216\u8005 Mac\uff0c\u60a8\u9700\u8981\u4fee\u6539.bashrc\uff0c\u4f8b\u5982\uff1a 1 export REDIS_HOST=localhost 1.2. \u5173\u95ed omicron \u00b6 \u5728\u60a8\u7684\u8fdb\u7a0b\u5373\u5c06\u9000\u51fa\u4e4b\u524d\uff0c\u8bf7\u8bb0\u5f97\u5173\u95ed omicron\u3002\u5982\u679c\u60a8\u662f\u5728 notebook \u4e2d\u4f7f\u7528 omicron, \u5219\u53ef\u4ee5\u5ffd\u7565\u6b64\u6b65\u805a\u3002 1 await omicron . close () 2. \u6570\u636e\u8bfb\u53d6 \u00b6 2.1. \u8bc1\u5238\u5217\u8868 \u00b6 Security \u548c Query \u63d0\u4f9b\u4e86\u8bc1\u5238\u5217\u8868\u548c\u67e5\u8be2\u64cd\u4f5c\u3002\u67e5\u8be2\u88ab\u8bbe\u8ba1\u6210\u4e3a\u94fe\u5f0f API\u3002\u901a\u5e38\uff0c\u6211\u4eec\u901a\u8fc7\u8c03\u7528 Security.select() \u6765\u751f\u6210\u4e00\u4e2a Query \u5bf9\u8c61\uff0c\u7136\u540e\u53ef\u4ee5\u9488\u5bf9\u6b64\u5bf9\u8c61\uff0c\u8fdb\u884c\u5404\u79cd\u8fc7\u67e5\u8be2\u8fc7\u6ee4\uff0c\u6700\u540e\uff0c\u6211\u4eec\u8c03\u7528 query.eval() \u65b9\u6cd5\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\uff0c\u5e76\u8fd4\u56de\u7ed3\u679c\u3002 2.1.1. \u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801 \u00b6 \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\u6765\u83b7\u53d6\u67d0\u4e00\u5929\u7684\u8bc1\u5238\u5217\u8868\uff1a 1 2 3 4 5 6 7 # 4. ASSUME YOU HAVE OMICRON INIT dt = datetime . date ( 2022 , 5 , 20 ) query = Security . select ( dt ) codes = await query . eval () print ( codes ) # THE OUTPUTS IS LIKE [\"000001.XSHE\", \"000004.XSHE\", ...] \u8fd9\u91cc\u7684 dt \u5982\u679c\u6ca1\u6709\u63d0\u4f9b\u7684\u8bdd\uff0c\u5c06\u4f7f\u7528\u6700\u65b0\u7684\u8bc1\u5238\u5217\u8868\u3002\u4f46\u5728\u56de\u6d4b\u4e2d\uff0c\u60a8\u901a\u5e38\u4e0d\u540c\u65f6\u95f4\u7684\u8bc1\u5238\u5217\u8868\uff0c\u56e0\u6b64\uff0c dt \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\u662f\u5fc5\u987b\u7684\uff0c\u5426\u5219\uff0c\u60a8\u5c06\u5f15\u5165\u672a\u6765\u6570\u636e\u3002 2.1.2. \u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570 \u00b6 1 2 3 query = Security . select ( dt ) codes = await query . types ([ \"stock\" ]) . eval () print ( codes ) 2.1.3. \u6392\u9664\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09 \u00b6 1 2 3 query = Security . select ( dt ) codes = await query . exclude_st () . exclude_kcb () . exclude_cyb () . eval () print ( codes ) 2.1.4. \u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09 \u00b6 1 2 3 4 query = Security . select ( dt ) codes = await query . only_kcb () . only_st () . only_cyb () . eval () print ( codes ) #\u5f97\u5230\u7a7a\u5217\u8868 2.1.5. \u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2 \u00b6 A \u80a1\u7684\u8bc1\u5238\u5728\u6807\u8bc6\u4e0a\uff0c\u4e00\u822c\u6709\u4ee3\u7801\uff08code \u6216\u8005 symbol)\u3001\u62fc\u97f3\u7b80\u5199 (name) \u548c\u6c49\u5b57\u8868\u793a\u540d (display_name) \u4e09\u79cd\u6807\u8bc6\u3002\u6bd4\u5982\u4e2d\u56fd\u5e73\u5b89\uff0c\u5176\u4ee3\u7801\u4e3a 601318.XSHG; \u5176\u62fc\u97f3\u7b80\u5199\u4e3a ZGPA\uff1b\u800c\u4e2d\u56fd\u5e73\u5b89\u88ab\u79f0\u4e3a\u5b83\u7684\u522b\u540d ( alias )\u3002 \u5982\u679c\u8981\u67e5\u8be2\u6240\u6709\u4e2d\u5b57\u5934\u7684\u80a1\u7968\uff1a 1 2 3 query = Security . select ( dt ) codes = await query . alias_like ( \"\u4e2d\" ) . eval () print ( codes ) 2.1.6. \u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f \u00b6 \u901a\u8fc7\u524d\u9762\u7684\u67e5\u8be2\u6211\u4eec\u53ef\u4ee5\u5f97\u5230\u4e00\u4e2a\u8bc1\u5238\u5217\u8868\uff0c\u5982\u679c\u8981\u5f97\u5230\u5177\u4f53\u7684\u4fe1\u606f\uff0c\u53ef\u4ee5\u901a\u8fc7 info \u63a5\u53e3\u6765\u67e5\u8be2\uff1a 1 2 3 dt = datetime . date ( 2022 , 5 , 20 ) info = await Security . info ( \"688001.XSHG\" , dt ) print ( info ) \u8f93\u51fa\u4e3a\uff1a 1 2 3 4 5 6 7 8 { ' t ype' : 's t ock' , 'display_ na me' : '\u534e\u5174\u6e90\u521b' , 'alias' : '\u534e\u5174\u6e90\u521b' , 'e n d' : da tet ime.da te ( 2200 , 1 , 1 ) , 's tart ' : da tet ime.da te ( 2019 , 7 , 22 ) , ' na me' : 'HXYC' } 2.2. \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97 \u00b6 Omicron \u4e0d\u4ec5\u63d0\u4f9b\u4e86\u4ea4\u6613\u65e5\u5386\uff0c\u4e0e\u5176\u5b83\u91cf\u5316\u6846\u67b6\u76f8\u6bd4\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u4e0e\u65f6\u95f4\u76f8\u5173\u7684\u8fd0\u7b97\u64cd\u4f5c\u3002\u8fd9\u4e9b\u64cd\u4f5c\u90fd\u6709\u8be6\u7ec6\u7684\u6587\u6863\u548c\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 TimeFrame \u6765\u8fdb\u4e00\u6b65\u9605\u8bfb\u3002 omicron \u4e2d\uff0c\u5e38\u5e38\u4f1a\u9047\u5230\u65f6\u95f4\u5e27 (Time Frame) \u8fd9\u4e2a\u6982\u5ff5\u3002\u56e0\u4e3a\u884c\u60c5\u6570\u636e\u90fd\u662f\u6309\u4e00\u5b9a\u7684\u65f6\u95f4\u957f\u5ea6\u7ec4\u7ec7\u7684\uff0c\u6bd4\u5982 5 \u5206\u949f\uff0c1 \u5929\uff0c\u7b49\u7b49\u3002\u56e0\u6b64\uff0c\u5728 omicron \u4e2d\uff0c\u6211\u4eec\u7ecf\u5e38\u4f7f\u7528\u67d0\u4e2a\u65f6\u95f4\u7247\u7ed3\u675f\u7684\u65f6\u95f4\uff0c\u6765\u6807\u8bc6\u8fd9\u4e2a\u65f6\u95f4\u7247\uff0c\u5e76\u5c06\u5176\u79f0\u4e4b\u4e3a\u5e27 (Time Frame)\u3002 omicron \u4e2d\uff0c\u6211\u4eec\u652f\u6301\u7684\u65f6\u95f4\u5e27\u5305\u62ec\u65e5\u5185\u7684\u5206\u949f\u5e27 (FrameType.MIN1), 5 \u5206\u949f\u5e27 (FrameType.MIN5), 15 \u5206\u949f\u5e27\u300130 \u5206\u949f\u5e27\u548c 60 \u5206\u949f\u5e27\uff0c\u4ee5\u53ca\u65e5\u7ebf\u7ea7\u522b\u7684 FrameType.DAY, FrameType.WEEK \u7b49\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u7c7b\u578b\u8bf4\u660e\uff0c\u8bf7\u53c2\u89c1 coretypes omicron \u63d0\u4f9b\u7684\u4ea4\u6613\u65e5\u5386\u8d77\u59cb\u4e8e 2005 \u5e74 1 \u6708 4 \u65e5\u3002\u63d0\u4f9b\u7684\u884c\u60c5\u6570\u636e\uff0c\u6700\u65e9\u4ece\u8fd9\u4e00\u5929\u8d77\u3002 \u5927\u81f4\u4e0a\uff0comicron \u63d0\u4f9b\u4e86\u4ee5\u4e0b\u65f6\u95f4\u5e27\u64cd\u4f5c\uff1a 2.2.1. \u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb \u00b6 \u5982\u679c\u4eca\u5929\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u60a8\u60f3\u5f97\u5230 100 \u5929\u524d\u7684\u4ea4\u6613\u65e5\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 day_shift: 1 2 3 4 from omicron import tf dt = datetime . date ( 2022 , 5 , 20 ) tf . day_shift ( dt , - 100 ) \u8f93\u51fa\u662f datetime.date(2021, 12, 16)\u3002\u5728\u8fd9\u91cc\uff0cday_shift \u7684\u7b2c\u4e8c\u4e2a\u53c2\u6570 n \u662f\u504f\u79fb\u91cf\uff0c\u5f53\u5b83\u5c0f\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u524d n \u4e2a\u4ea4\u6613\u65e5\uff1b\u5f53\u5b83\u5927\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u4e4b\u540e\u7684 n \u4e2a\u4ea4\u6613\u65e5\u3002 \u6bd4\u5982\u6709\u610f\u601d\u7684\u662f n == 0 \u7684\u65f6\u5019\u3002\u5bf9\u4e0a\u8ff0 dt \uff0cday_shift(dt, 0) \u5f97\u5230\u7684\u4ecd\u7136\u662f\u540c\u4e00\u5929\uff0c\u4f46\u5982\u679c dt \u662f 2022 \u5e74 5 \u6708 21 \u65e5\u662f\u5468\u516d\uff0c\u5219 day_shift(datetime.date(2022, 5, 21)) \u5c06\u8fd4\u56de 2022 \u5e74 5 \u6708 20 \u65e5\u3002\u56e0\u4e3a 5 \u6708 21 \u65e5\u8fd9\u4e00\u5929\u662f\u5468\u516d\uff0c\u4e0d\u662f\u4ea4\u6613\u65e5\uff0cday_shift \u5c06\u8fd4\u56de\u5176\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff0c\u8fd9\u5728\u591a\u6570\u60c5\u51b5\u4e0b\u4f1a\u975e\u5e38\u65b9\u4fbf\u3002 \u9664\u4e86 day_shift \u5916\uff0ctimeframe \u8fd8\u63d0\u4f9b\u4e86\u7c7b\u4f3c\u51fd\u6570\u6bd4\u5982 week_shift \u7b49\u3002\u4e00\u822c\u5730\uff0c\u60a8\u53ef\u4ee5\u7528 shift(dt, n, frame_type) \u6765\u5bf9\u4efb\u610f\u652f\u6301\u7684\u65f6\u95f4\u8fdb\u884c\u504f\u79fb\u3002 2.2.2. \u8fb9\u754c\u64cd\u4f5c ceiling \u548c floor \u00b6 \u5f88\u591a\u65f6\u5019\u6211\u4eec\u9700\u8981\u77e5\u9053\u5177\u4f53\u7684\u67d0\u4e2a\u65f6\u95f4\u70b9 (moment) \u6240\u5c5e\u7684\u5e27\u3002\u5982\u679c\u8981\u53d6\u5176\u4e0a\u4e00\u5e27\uff0c\u5219\u53ef\u4ee5\u7528 floor \u64cd\u4f5c\uff0c\u53cd\u4e4b\uff0c\u4f7f\u7528 ceiling\u3002 1 2 tf . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) # OUTPUT IS DATETIME.DATE(2005, 1, 7) 2.2.3. \u65f6\u95f4\u8f6c\u6362 \u00b6 \u4e3a\u4e86\u52a0\u5feb\u901f\u5ea6\uff0c\u4ee5\u53ca\u65b9\u4fbf\u6301\u4e45\u5316\u5b58\u50a8\uff0c\u5728 timeframe \u5185\u90e8\uff0c\u6709\u65f6\u5019\u4f7f\u7528\u6574\u6570\u6765\u8868\u793a\u65f6\u95f4\u3002\u6bd4\u5982 20220502 \u8868\u793a\u7684\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u800c 202205220931 \u5219\u8868\u793a 2022 \u5e74 5 \u6708 20 \u65e5 9 \u65f6 31 \u5206\u949f\u3002 \u8fd9\u79cd\u8868\u793a\u6cd5\uff0c\u6709\u65f6\u5019\u8981\u6c42\u6211\u4eec\u8fdb\u884c\u4e00\u4e9b\u8f6c\u6362\uff1a 1 2 3 4 5 6 7 8 9 10 # \u5c06\u6574\u6570\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u4e3a\u65e5\u671f tf . int2date ( 20220522 ) # datetime.date(2022, 5, 22) # \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a\u65f6\u95f4 tf . int2time ( 202205220931 ) # datetime.datetime(2022, 5, 22, 9, 31) # \u5c06\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u6574\u6570 tf . date2int ( datetime . date ( 2022 , 5 , 22 )) # 20220520 # \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u4e3a\u65f6\u95f4 tf . date2time ( datetime . datetime ( 2022 , 5 , 22 , 9 , 21 )) # 202205220921 2.2.4. \u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27 \u00b6 \u6709\u65f6\u5019\u6211\u4eec\u9700\u8981\u5f97\u5230 start \u548c end \u4e4b\u95f4\u67d0\u4e2a\u65f6\u95f4\u5e27\u7c7b\u578b\u7684\u6240\u6709\u65f6\u95f4\u5e27\uff1a 1 2 3 4 start = arrow . get ( '2020-1-13 10:00' ) . naive end = arrow . get ( '2020-1-13 13:30' ) . naive tf . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Important \u4e0a\u9762\u7684\u793a\u4f8b\u4e2d\uff0c\u51fa\u73b0\u4e86\u53ef\u80fd\u60a8\u4e0d\u592a\u719f\u6089\u7684 naive \u5c5e\u6027\u3002\u5b83\u6307\u7684\u662f\u53d6\u4e0d\u5e26\u65f6\u533a\u7684\u65f6\u95f4\u3002\u5728 python \u4e2d\uff0c\u65f6\u95f4\u53ef\u4ee5\u5e26\u65f6\u533a\uff08timezone-aware) \u548c\u4e0d\u5e26\u65f6\u533a (naive)\u3002 \u5982\u679c\u60a8\u4f7f\u7528 datetime.datetime(2022, 5, 20)\uff0c\u5b83\u5c31\u662f\u4e0d\u5e26\u65f6\u533a\u7684\uff0c\u9664\u975e\u60a8\u4e13\u95e8\u6307\u5b9a\u65f6\u533a\u3002 \u5728 omicron \u4e2d\uff0c\u6211\u4eec\u5728\u7edd\u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u4ec5\u4f7f\u7528 naive \u8868\u793a\u7684\u65f6\u95f4\uff0c\u5373\u4e0d\u5e26\u65f6\u533a\uff0c\u5e76\u4e14\u5047\u5b9a\u65f6\u533a\u4e3a\u4e1c\u516b\u533a\uff08\u5373\u5317\u4eac\u65f6\u95f4\uff09\u3002 \u5982\u679c\u60a8\u53ea\u77e5\u9053\u7ed3\u675f\u65f6\u95f4\uff0c\u9700\u8981\u5411\u524d\u53d6 n \u4e2a\u65f6\u95f4\u5e27\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 get_frames_by_count \u3002 \u5982\u679c\u60a8\u53ea\u662f\u9700\u8981\u77e5\u9053\u5728 start \u548c end \u4e4b\u95f4\uff0c\u603b\u5171\u6709\u591a\u5c11\u4e2a\u5e27\uff0c\u8bf7\u4f7f\u7528 count_frames : 1 2 3 start = datetime . date ( 2019 , 12 , 21 ) end = datetime . date ( 2019 , 12 , 21 ) tf . count_frames ( start , end , FrameType . DAY ) \u8f93\u51fa\u5c06\u662f 1\u3002\u4e0a\u8ff0\u65b9\u6cd5\u8fd8\u6709\u4e00\u4e2a\u5feb\u6377\u65b9\u6cd5\uff0c\u5373 count_day_frames \uff0c\u5e76\u4e14\uff0c\u5bf9 week, month, quaters \u4e5f\u662f\u4e00\u6837\u3002 2.3. \u8bfb\u53d6\u884c\u60c5\u6570\u636e \u00b6 \u73b0\u5728\uff0c\u8ba9\u6211\u4eec\u6765\u83b7\u53d6\u4e00\u6bb5\u884c\u60c5\u6570\u636e\uff1a 1 2 3 4 code = \"000001.XSHE\" end = datetime . date ( 2022 , 5 , 20 ) bars = await Stock . get_bars ( code , 10 , FrameType . DAY , end ) \u8fd4\u56de\u7684 bars \u5c06\u662f\u4e00\u4e2a numpy structured array, \u5176\u7c7b\u578b\u4e3a bars_dtype \u3002\u4e00\u822c\u5730\uff0c\u5b83\u5305\u62ec\u4e86\u4ee5\u4e0b\u5b57\u6bb5\uff1a 1 2 3 4 5 6 7 8 * frame\uff08\u5e27\uff09 * open\uff08\u5f00\u76d8\u4ef7\uff09 * high\uff08\u6700\u9ad8\u4ef7\uff09 * low\uff08\u6700\u4f4e\u4ef7\uff09 * close\uff08\u6536\u76d8\u4ef7\uff09 * volume\uff08\u6210\u4ea4\u91cf\uff0c\u80a1\u6570\uff09 * amount\uff08\u6210\u4ea4\u989d\uff09 * factor\uff08\u590d\u6743\u56e0\u5b50\uff09 \u7f3a\u7701\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u662f\u5230 end \u4e3a\u6b62\u7684\u524d\u590d\u6743\u6570\u636e\u3002\u4f60\u53ef\u4ee5\u901a\u53c2\u6570 fq = False \u5173\u95ed\u5b83\uff0c\u6765\u83b7\u5f97\u4e0d\u590d\u6743\u6570\u636e\uff0c\u5e76\u4ee5\u6b64\u81ea\u884c\u8ba1\u7b97\u540e\u590d\u6743\u6570\u636e\u3002 \u5982\u679c\u8981\u83b7\u53d6\u67d0\u4e2a\u65f6\u95f4\u6bb5\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528 get_bars_in_range \u3002 \u4e0a\u8ff0\u65b9\u6cd5\u603b\u662f\u5c3d\u6700\u5927\u53ef\u80fd\u8fd4\u56de\u5b9e\u65f6\u6570\u636e\uff0c\u5982\u679c end \u4e3a\u5f53\u524d\u65f6\u95f4\u7684\u8bdd\uff0c\u4f46\u7531\u4e8e omega \u540c\u6b65\u5ef6\u65f6\u662f\u4e00\u5206\u949f\uff0c\u6240\u4ee5\u884c\u60c5\u6570\u636e\u6700\u591a\u53ef\u80fd\u6162\u4e00\u5206\u949f\u3002\u5982\u679c\u8981\u83b7\u53d6\u66f4\u5b9e\u65f6\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u901a\u8fc7 get_latest_price \u65b9\u6cd5\u3002 \u8981\u83b7\u6da8\u8dcc\u505c\u4ef7\u683c\u548c\u6807\u5fd7\uff0c\u8bf7\u4f7f\u7528: get_trade_price_limits trade_price_limits_flags trade_price_limit_flags_ex 2.4. \u677f\u5757\u6570\u636e \u00b6 \u63d0\u4f9b\u540c\u82b1\u987a\u677f\u5757\u884c\u4e1a\u677f\u5757\u548c\u6982\u5ff5\u677f\u5757\u6570\u636e\u3002\u5728\u4f7f\u7528\u672c\u6a21\u5757\u4e4b\u524d\uff0c\u9700\u8981\u8fdb\u884c\u521d\u59cb\u5316\uff1a 1 2 3 # \u8bf7\u5148\u8fdb\u884comicron\u521d\u59cb\u5316\uff0c\u7565 from omicron.models.board import Board , BoardType Board . init ( '192.168.100.101' ) \u6b64\u5904\u7684IP\u4e3a\u5b89\u88c5omega\u670d\u52a1\u5668\u7684ip\u3002 \u901a\u8fc7 board_list \u6765\u67e5\u8be2\u6240\u6709\u7684\u677f\u5757\u3002 \u5176\u5b83\u65b9\u6cd5\u8bf7\u53c2\u770b API\u6587\u6863 3. \u7b56\u7565\u7f16\u5199 \u00b6 omicron \u901a\u8fc7 strategy \u6765\u63d0\u4f9b\u7b56\u7565\u6846\u67b6\u3002\u901a\u8fc7\u8be5\u6846\u67b6\u7f16\u5199\u7684\u7b56\u7565\uff0c\u53ef\u4ee5\u5728\u5b9e\u76d8\u548c\u56de\u6d4b\u4e4b\u95f4\u65e0\u7f1d\u8f6c\u6362 -- \u6839\u636e\u521d\u59cb\u5316\u65f6\u4f20\u5165\u7684\u670d\u52a1\u5668\u4e0d\u540c\u800c\u81ea\u52a8\u5207\u6362\u3002 omicron \u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u7684 \u53cc\u5747\u7ebf\u7b56\u7565 \u4f5c\u4e3a\u7b56\u7565\u7f16\u5199\u7684\u793a\u8303\uff0c\u53ef\u7ed3\u5408\u5176\u6e90\u7801\uff0c\u4ee5\u53ca\u672c\u6587\u6863\u4e2d\u7684 \u5b8c\u6574\u7b56\u7565\u793a\u4f8b \u5728notebook\u4e2d\u8fd0\u884c\u67e5\u770b\u3002 \u7b56\u7565\u6846\u67b6\u63d0\u4f9b\u4e86\u56de\u6d4b\u9a71\u52a8\u903b\u8f91\u53ca\u4e00\u4e9b\u57fa\u672c\u51fd\u6570\u3002\u8981\u7f16\u5199\u81ea\u5df1\u7684\u7b56\u7565\uff0c\u60a8\u9700\u8981\u4ece\u57fa\u7c7b BaseStrategy \u6d3e\u751f\u51fa\u81ea\u5df1\u7684\u5b50\u7c7b\uff0c\u5e76\u6539\u5199\u5b83\u7684 predict \u65b9\u6cd5\u6765\u5b9e\u73b0\u8c03\u4ed3\u6362\u80a1\u3002 \u7b56\u7565\u6846\u67b6\u4f9d\u8d56\u4e8e zillionare-trader-client \uff0c\u5728\u56de\u6d4b\u65f6\uff0c\u9700\u8981\u6709 zillionare-backtesting \u63d0\u4f9b\u56de\u6d4b\u670d\u52a1\u3002\u5728\u5b9e\u76d8\u65f6\uff0c\u9700\u8981 zilllionare-gm-adaptor \u6216\u8005\u5176\u5b83\u5b9e\u76d8\u4ea4\u6613\u7f51\u5173\u63d0\u4f9b\u670d\u52a1\u3002 \u7b56\u7565\u4ee3\u7801\u53ef\u4ee5\u4e0d\u52a0\u4fee\u6539\uff0c\u5373\u53ef\u4f7f\u7528\u4e8e\u56de\u6d4b\u548c\u5b9e\u76d8\u4e24\u79cd\u573a\u666f\u3002 3.1. \u56de\u6d4b\u573a\u666f \u00b6 \u5b9e\u73b0\u7b56\u7565\u56de\u6d4b\uff0c\u4e00\u822c\u9700\u8981\u8fdb\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a 1. \u4ece\u6b64\u57fa\u7c7b\u6d3e\u751f\u51fa\u4e00\u4e2a\u7b56\u7565\u5b50\u7c7b\uff0c\u6bd4\u5982sma.py 2. \u5b50\u7c7b\u9700\u8981\u91cd\u8f7d predict \u65b9\u6cd5\uff0c\u6839\u636e\u5f53\u524d\u4f20\u5165\u7684\u65f6\u95f4\u5e27\u548c\u5e27\u7c7b\u578b\u53c2\u6570\uff0c\u83b7\u53d6\u6570\u636e\u5e76\u8fdb\u884c\u5904\u7406\uff0c\u8bc4\u4f30\u51fa\u4ea4\u6613\u4fe1\u53f7\u3002 3. \u5b50\u7c7b\u6839\u636e\u4ea4\u6613\u4fe1\u53f7\uff0c\u5728 predict \u65b9\u6cd5\u91cc\uff0c\u8c03\u7528\u57fa\u7c7b\u7684 buy \u548c sell \u65b9\u6cd5\u6765\u8fdb\u884c\u4ea4\u6613 4. \u751f\u6210\u7b56\u7565\u5b9e\u4f8b\uff0c\u901a\u8fc7\u5b9e\u4f8b\u8c03\u7528 backtest \u65b9\u6cd5\u6765\u8fdb\u884c\u56de\u6d4b\uff0c\u8be5\u65b9\u6cd5\u5c06\u6839\u636e\u7b56\u7565\u6784\u5efa\u65f6\u6307\u5b9a\u7684\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4\u3001\u7ec8\u6b62\u65f6\u95f4\u3001\u5e27\u7c7b\u578b\uff0c\u9010\u5e27\u751f\u6210\u5404\u4e2a\u65f6\u95f4\u5e27\uff0c\u5e76\u8c03\u7528\u5b50\u7c7b\u7684 predict \u65b9\u6cd5\u3002\u5982\u679c\u8c03\u7528\u65f6\u6307\u5b9a\u4e86 portfolio \u548c min_bars \u53c2\u6570\uff0c backtest \u8fd8\u5c06\u8fdb\u884c\u6570\u636e\u9884\u53d6\uff0c\u5e76\u5c06\u622a\u6b62\u5230\u5f53\u524d\u56de\u6d4b\u5e27\u65f6\u7684\u6570\u636e\u4f20\u5165\u3002 4. \u5728\u4ea4\u6613\u7ed3\u675f\u65f6\uff0c\u8c03\u7528 plot_metrics \u65b9\u6cd5\u6765\u83b7\u53d6\u5982\u4e0b\u6240\u793a\u7684\u56de\u6d4b\u6307\u6807\u56fe \u5982\u4f55\u6d3e\u751f\u5b50\u7c7b\uff0c\u53ef\u4ee5\u53c2\u8003 sma \u6e90\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 from omicron.strategy.sma import SMAStrategy sma = SMAStrategy ( url = \"\" , # the url of either backtest server, or trade server is_backtest = True , start = datetime . date ( 2023 , 2 , 3 ), end = datetime . date ( 2023 , 4 , 28 ), frame_type = FrameType . DAY , ) await sma . backtest ( portfolio = [ \"600000.XSHG\" , min_bars = 20 ]) \u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u8981\u6307\u5b9a is_backtest=True \u548c start , end \u53c2\u6570\u3002 3.2. \u56de\u6d4b\u62a5\u544a \u00b6 \u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\uff0c\u5728notebook\u4e2d\u7ed8\u5236\u56de\u6d4b\u62a5\u544a\uff1a 1 await sma . plot_metrics () \u8fd9\u5c06\u7ed8\u5236\u51fa\u7c7b\u4f3c\u4ee5\u4e0b\u56fe\uff1a 3.2.1. \u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807 \u00b6 Info Since 2.0.0.a76 \u9996\u5148\uff0c\u6211\u4eec\u53ef\u4ee5\u5728\u7b56\u7565\u7c7b\u7684predict\u65b9\u6cd5\u4e2d\u8ba1\u7b97\u51fa\u6280\u672f\u6307\u6807\uff0c\u5e76\u4fdd\u5b58\u5230\u6210\u5458\u53d8\u91cf\u4e2d\u3002\u5728\u4e0b\u9762\u7684\u793a\u4f8b\u4ee3\u7801\u4e2d\uff0c\u6211\u4eec\u5c06\u6280\u672f\u6307\u6807\u53ca\u5f53\u65f6\u7684\u65f6\u95f4\u4fdd\u5b58\u5230\u4e86\u4e00\u4e2aindicators\u6570\u7ec4\u4e2d\uff08\u6ce8\u610f\u987a\u5e8f\uff01\uff09\uff0c\u7136\u540e\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u5728\u8c03\u7528 plot_metrics\u65f6\uff0c\u5c06\u5176\u4f20\u5165\u5373\u53ef\u3002 1 2 3 4 5 6 7 indicators = [ (datetime.date(2021, 2, 3), 20.1), (datetime.date(2021, 2, 4), 20.2), ..., (datetime.date(2021, 4, 1), 20.3) ] await sma.plot_metrics(indicator) \u65f6\u95f4\u53ea\u80fd\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u65f6\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u4ea7\u751f\u65e0\u6cd5\u4e0e\u5750\u6807\u8f74\u5bf9\u9f50\u7684\u60c5\u51b5\u3002 \u52a0\u5165\u7684\u6307\u6807\u9ed8\u8ba4\u53ea\u663e\u793a\u5728legend\u4e2d\uff0c\u5982\u679c\u8981\u663e\u793a\u5728\u4e3b\u56fe\u4e0a\uff0c\u9700\u8981\u70b9\u51fblegend\u8fdb\u884c\u663e\u793a\u3002 \u6307\u6807\u9664\u53ef\u4ee5\u53e0\u52a0\u5728\u4e3b\u56fe\u4e0a\u4e4b\u5916\uff0c\u8fd8\u4f1a\u51fa\u73b0\u5728\u57fa\u51c6\u7ebf\u7684hoverinfo\u4e2d\uff08\u5373\u4f7f\u6307\u6807\u7684\u8ba1\u7b97\u4e0e\u57fa\u51c6\u7ebf\u65e0\u5173\uff09\uff0c\u53c2\u89c1\u4e0a\u56fe\u4e2d\u7684\u201c\u6307\u6807\u201d\u884c\u3002 3.3. \u4f7f\u7528\u6570\u636e\u9884\u53d6 \u00b6 Info since version 2.0.0-alpha76 \u5728\u56de\u6d4b\u4e2d\uff0c\u53ef\u4ee5\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u6570\u636e\u9884\u53d6\uff0c\u4ee5\u52a0\u5feb\u56de\u6d4b\u901f\u5ea6\u3002\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a \u5982\u679c\u7b56\u7565\u5728\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 \u5982\u679c\u5728\u56de\u6d4b\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u5077\u770b\u672a\u6765\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528peek\u65b9\u6cd5\u3002 3.4. \u5b8c\u6574SMA\u56de\u6d4b\u793a\u4f8b \u00b6 \u4ee5\u4e0b\u7b56\u7565\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5e76\u4e14\u9700\u8981\u4e8b\u5148\u5b89\u88c5omega\u670d\u52a1\u5668\u540c\u6b65\u6570\u636e\uff0c\u5e76\u6b63\u786e\u914d\u7f6eomicron\u3002 \u8be5\u793a\u4f8b\u5728\u300a\u5927\u5bcc\u7fc1\u91cf\u5316\u8bfe\u7a0b\u300b\u8bfe\u4ef6\u73af\u5883\u4e0b\u53ef\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () sec = \"600000.XSHG\" start = datetime . date ( 2022 , 1 , 4 ) end = datetime . date ( 2023 , 1 , 1 ) sma = SMAStrategy ( sec , url = cfg . backtest . url , is_backtest = True , start = start , end = end , frame_type = FrameType . DAY ) await sma . backtest ( portfolio = [ sec ], min_bars = 10 , stop_on_error = False ) await sma . plot_metrics ( sma . indicators ) 3.5. \u5b9e\u76d8 \u00b6 \u5728\u5b9e\u76d8\u73af\u5883\u4e0b\uff0c\u4f60\u8fd8\u9700\u8981\u5728\u5b50\u7c7b\u4e2d\u52a0\u5165\u5468\u671f\u6027\u4efb\u52a1(\u6bd4\u5982\u6bcf\u5206\u949f\u6267\u884c\u4e00\u6b21\uff09\uff0c\u5728\u8be5\u4efb\u52a1\u4e2d\u8c03\u7528 predict \u65b9\u6cd5\u6765\u5b8c\u6210\u4ea4\u6613\uff0c\u5982\u4ee5\u4e0b\u793a\u4f8b\u6240\u793a\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType from apscheduler.schedulers.asyncio import AsyncIOScheduler cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () async def daily_job (): sma = SMAStrategy ( sec , url = cfg . traderserver . url , is_backtest = False , frame_type = FrameType . DAY ) bars = await Stock . get_bars ( sma . _sec , 20 , FrameType . DAY ) await sma . predict ( barss = { sma . _sec : bars }) async def main (): scheduler = AsyncIOScheduler () scheduler . add_job ( daily_job , 'cron' , hour = 14 , minute = 55 ) scheduler . start () \u7b56\u7565\u4ee3\u7801\u65e0\u987b\u4fee\u6539\u3002 \u8be5\u7b56\u7565\u5c06\u81ea\u52a8\u5728\u6bcf\u5929\u768414\uff1a55\u8fd0\u884c\uff0c\u4ee5\u5224\u65ad\u662f\u5426\u8981\u8fdb\u884c\u8c03\u4ed3\u6362\u80a1\u3002\u60a8\u9700\u8981\u989d\u5916\u5224\u65ad\u5f53\u5929\u662f\u5426\u4e3a\u4ea4\u6613\u65e5\u3002 4. \u7ed8\u56fe \u00b6 omicron \u901a\u8fc7 Candlestick \u63d0\u4f9b\u4e86 k \u7ebf\u7ed8\u5236\u529f\u80fd\u3002\u9ed8\u8ba4\u5730\uff0c\u5b83\u5c06\u7ed8\u5236\u4e00\u5e45\u663e\u793a 120 \u4e2a bar\uff0c\u53ef\u62d6\u52a8\uff08\u4ee5\u52a0\u8f7d\u66f4\u591a bar)\uff0c\u5e76\u4e14\u53ef\u4ee5\u53e0\u52a0\u526f\u56fe\u3001\u4e3b\u56fe\u53e0\u52a0\u5404\u79cd\u6307\u6807\u7684 k \u7ebf\u56fe\uff1a \u4e0a\u56fe\u663e\u793a\u4e86\u81ea\u52a8\u68c0\u6d4b\u51fa\u6765\u7684\u5e73\u53f0\u3002\u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u8fdb\u884c\u9876\u5e95\u81ea\u52a8\u68c0\u6d4b\u548c\u6807\u6ce8\u3002 Note \u901a\u8fc7\u6307\u5b9a width \u53c2\u6570\uff0c\u53ef\u4ee5\u5f71\u54cd\u521d\u59cb\u52a0\u8f7d\u7684bar\u7684\u6570\u91cf\u3002 omicron \u901a\u8fc7 metris \u63d0\u4f9b\u56de\u6d4b\u62a5\u544a\u3002\u8be5\u62a5\u544a\u7c7b\u4f3c\u4e8e\uff1a \u5b83\u540c\u6837\u63d0\u4f9b\u53ef\u62d6\u52a8\u7684\u7ed8\u56fe\uff0c\u5e76\u4e14\u5728\u4e70\u5356\u70b9\u4e0a\u53ef\u4ee5\u901a\u8fc7\u9f20\u6807\u60ac\u505c\uff0c\u663e\u793a\u4e70\u5356\u70b9\u4fe1\u606f\u3002 omicron \u7684\u7ed8\u56fe\u529f\u80fd\u53ea\u80fd\u5728 notebook \u4e2d\u4f7f\u7528\u3002 5. \u8bc4\u4f30\u6307\u6807 \u00b6 omicron \u63d0\u4f9b\u4e86 mean_absolute_error \u51fd\u6570\u548c pct_error \u51fd\u6570\u3002\u5b83\u4eec\u5728 scipy \u6216\u8005\u5176\u5b83\u5e93\u4e2d\u4e5f\u80fd\u627e\u5230\uff0c\u4e3a\u4e86\u65b9\u4fbf\u4e0d\u719f\u6089\u8fd9\u4e9b\u7b2c\u4e09\u65b9\u5e93\u7684\u4f7f\u7528\u8005\uff0c\u6211\u4eec\u5185\u7f6e\u4e86\u8fd9\u4e2a\u5e38\u6307\u6807\u3002 \u5bf9\u4e00\u4e9b\u5e38\u89c1\u7684\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\uff0c\u6211\u4eec\u5f15\u7528\u4e86 empyrical \u4e2d\u7684\u76f8\u5173\u51fd\u6570\uff0c\u6bd4\u5982 alpha, beta, shapre_ratio\uff0c calmar_ratio \u7b49\u3002 6. TALIB \u5e93 \u00b6 \u60a8\u5e94\u8be5\u628a\u8fd9\u91cc\u63d0\u4f9b\u7684\u51fd\u6570\u5f53\u6210\u5b9e\u9a8c\u6027\u7684\u3002\u8fd9\u4e9b API \u4e5f\u53ef\u80fd\u5728\u67d0\u5929\u88ab\u5e9f\u5f03\u3001\u91cd\u547d\u540d\u3001\u4fee\u6539\uff0c\u6216\u8005\u8fd9\u4e9b API \u5e76\u6ca1\u6709\u591a\u5927\u4f5c\u7528\uff0c\u6216\u8005\u5b83\u4eec\u7684\u5b9e\u73b0\u5b58\u5728\u9519\u8bef\u3002 \u4f46\u662f\uff0c\u5982\u679c\u6211\u4eec\u5c06\u6765\u4f1a\u629b\u5f03\u8fd9\u4e9b API \u7684\u8bdd\uff0c\u6211\u4eec\u4e00\u5b9a\u4f1a\u901a\u8fc7 depracted \u65b9\u6cd5\u63d0\u524d\u8fdb\u884c\u8b66\u544a\u3002 7. \u6269\u5c55 \u00b6 Python\u5f53\u4e2d\u7684\u56db\u820d\u4e94\u5165\u7528\u4e8e\u8bc1\u5238\u6295\u8d44\uff0c\u4f1a\u5e26\u6765\u4e25\u91cd\u7684\u95ee\u9898\uff0c\u6bd4\u5982\uff0c\u50cf round(0.3/2) \uff0c\u6211\u4eec\u671f\u671b\u5f97\u5230 0.2 \uff0c\u4f46\u5b9e\u9645\u4e0a\u4f1a\u5f97\u5230 0.1 \u3002\u8fd9\u79cd\u8bef\u5dee\u4e00\u65e6\u53d1\u751f\u6210\u5728\u4e00\u4e9b\u4f4e\u4ef7\u80a1\u8eab\u4e0a\uff0c\u5c06\u4f1a\u5e26\u6765\u975e\u5e38\u5927\u7684\u4e0d\u786e\u5b9a\u6027\u3002\u6bd4\u5982\uff0c1.945\u4fdd\u7559\u4e24\u4f4d\u5c0f\u6570\uff0c\u672c\u6765\u5e94\u8be5\u662f1.95\uff0c\u5982\u679c\u88ab\u8bef\u820d\u5165\u4e3a1.94\uff0c\u5219\u8bef\u5dee\u63a5\u8fd10.5%\uff0c\u8fd9\u5bf9\u6295\u8d44\u6765\u8bf4\u662f\u96be\u4ee5\u63a5\u53d7\u7684\u3002 Info \u5982\u679c\u4e00\u5929\u53ea\u8fdb\u884c\u4e00\u6b21\u4ea4\u6613\uff0c\u4e00\u6b21\u4ea4\u6613\u8bef\u5dee\u4e3a0.5%\uff0c\u4e00\u5e74\u7d2f\u79ef\u4e0b\u6765\uff0c\u8bef\u5dee\u5c06\u8fbe\u52302.5\u500d\u3002 \u6211\u4eec\u5728 decimals \u4e2d\u63d0\u4f9b\u4e86\u9002\u7528\u4e8e\u8bc1\u5238\u4ea4\u6613\u9886\u57df\u7684\u7248\u672c\uff0c math_round \u548c\u4ef7\u683c\u6bd4\u8f83\u51fd\u6570 price_equal \u3002 \u6211\u4eec\u8fd8\u5728 np \u4e2d\uff0c\u5bf9numpy\u4e2d\u7f3a\u5931\u7684\u4e00\u4e9b\u529f\u80fd\u8fdb\u884c\u4e86\u8865\u5145\uff0c\u6bd4\u5982 numpy_append_fields , fill_nan \u7b49\u3002","title":"\u4f7f\u7528\u6559\u7a0b"},{"location":"usage/#1-\u914d\u7f6e\u521d\u59cb\u5316\u548c\u5173\u95ed-omicron","text":"Omicron \u4f9d\u8d56\u4e8e zillionare-omega \u670d\u52a1\u6765\u83b7\u53d6\u6570\u636e\u3002\u4f46\u5b83\u5e76\u4e0d\u76f4\u63a5\u4e0e Omega \u670d\u52a1\u901a\u8baf\uff0c\u76f8\u53cd\uff0c\u5b83\u76f4\u63a5\u8bfb\u53d6 Omega \u670d\u52a1\u5668\u4f1a\u5199\u5165\u6570\u636e\u7684 Influxdb \u548c redis \u6570\u636e\u5e93\u3002\u56e0\u6b64\uff0c\u5728\u4f7f\u7528 Omicron \u4e4b\u524d\uff0c\u6211\u4eec\u9700\u8981\u63d0\u4f9b\u8fd9\u4e24\u4e2a\u670d\u52a1\u5668\u7684\u8fde\u63a5\u5730\u5740\uff0c\u5e76\u8fdb\u884c\u521d\u59cb\u5316\u3002","title":"1. \u914d\u7f6e\u3001\u521d\u59cb\u5316\u548c\u5173\u95ed OMICRON"},{"location":"usage/#11-\u914d\u7f6e\u548c\u521d\u59cb\u5316","text":"Omicron \u4f7f\u7528 cfg4py \u6765\u7ba1\u7406\u914d\u7f6e\u3002 cfg4py \u4f7f\u7528 yaml \u6587\u4ef6\u6765\u4fdd\u5b58\u914d\u7f6e\u9879\u3002\u5728\u4f7f\u7528 cfg4py \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u5728\u67d0\u5904\u521d\u59cb\u5316 cfg4py\uff0c\u7136\u540e\u518d\u521d\u59cb\u5316 omicron: Tip \u4e3a\u4e86\u7b80\u6d01\u8d77\u89c1\uff0c\u6211\u4eec\u5728\u9876\u5c42\u4ee3\u7801\u4e2d\u76f4\u63a5\u4f7f\u7528\u4e86 async/await\u3002\u901a\u5e38\uff0c\u8fd9\u4e9b\u4ee3\u7801\u80fd\u591f\u76f4\u63a5\u5728 notebook \u4e2d\u8fd0\u884c\uff0c\u4f46\u5982\u679c\u9700\u8981\u5728\u666e\u901a\u7684 python \u811a\u672c\u4e2d\u8fd0\u884c\u8fd9\u4e9b\u4ee3\u7801\uff0c\u60a8\u901a\u5e38\u9700\u8981\u5c06\u5176\u5c01\u88c5\u5230\u4e00\u4e2a\u5f02\u6b65\u51fd\u6570\u4e2d\uff0c\u518d\u901a\u8fc7 asyncio.run \u6765\u8fd0\u884c\u5b83\u3002 1 2 3 4 5 6 7 8 9 import asyncio import cfg4py import omicron async def main (): cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () # DO YOUR GREAT JOB WITH OMICRON asyncio . run ( main ()) 1 2 3 4 5 import cfg4py import omicron cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () \u6ce8\u610f\u521d\u59cb\u5316 cfg4py \u65f6\uff0c\u9700\u8981\u63d0\u4f9b\u5305\u542b\u914d\u7f6e\u6587\u4ef6\u7684 \u6587\u4ef6\u5939 \u7684\u8def\u5f84\uff0c\u800c \u4e0d\u662f\u914d\u7f6e\u6587\u4ef6 \u7684\u8def\u5f84\u3002\u914d\u7f6e\u6587\u4ef6\u540d\u5fc5\u987b\u4e3a defaults.yml\u3002 \u60a8\u81f3\u5c11\u5e94\u8be5\u4e3a omicron \u914d\u7f6e Redis \u8fde\u63a5\u4e32\u548c influxdb \u8fde\u63a5\u4e32\u3002\u4e0b\u9762\u662f\u5e38\u7528\u914d\u7f6e\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 # DEFAULTS.YAML redis : dsn : redis://${REDIS_HOST}:${REDIS_PORT} influxdb : url : http://${INFLUXDB_HOST}:${INFLUXDB_PORT} token : ${INFLUXDB_TOKEN} org : ${INFLUXDB_ORG} bucket_name : ${INFLUXDB_BUCKET_NAME} enable_compress : true max_query_size : 150000 notify : mail_from : ${MAIL_FROM} mail_to : - ${MAIL_TO} mail_server : ${MAIL_SERVER} dingtalk_access_token : ${DINGTALK_ACCESS_TOKEN} dingtalk_secret : ${DINGTALK_SECRET} \u8bf7\u6839\u636e\u60a8\u5b9e\u9645\u73af\u5883\u914d\u7f6e\u6765\u66f4\u6539\u4e0a\u8ff0\u6587\u4ef6\u3002\u4e0a\u8ff0\u914d\u7f6e\u4e2d\uff0c${{REDIS_HOST}}\u610f\u5473\u7740\u73af\u5883\u53d8\u91cf\u3002\u5982\u679c\u662f windows\uff0c\u60a8\u9700\u8981\u5728\u7cfb\u7edf > \u73af\u5883\u53d8\u91cf\u4e2d\u8fdb\u884c\u8bbe\u7f6e\u3002\u5982\u679c\u662f Linux \u6216\u8005 Mac\uff0c\u60a8\u9700\u8981\u4fee\u6539.bashrc\uff0c\u4f8b\u5982\uff1a 1 export REDIS_HOST=localhost","title":"1.1. \u914d\u7f6e\u548c\u521d\u59cb\u5316"},{"location":"usage/#12-\u5173\u95ed-omicron","text":"\u5728\u60a8\u7684\u8fdb\u7a0b\u5373\u5c06\u9000\u51fa\u4e4b\u524d\uff0c\u8bf7\u8bb0\u5f97\u5173\u95ed omicron\u3002\u5982\u679c\u60a8\u662f\u5728 notebook \u4e2d\u4f7f\u7528 omicron, \u5219\u53ef\u4ee5\u5ffd\u7565\u6b64\u6b65\u805a\u3002 1 await omicron . close ()","title":"1.2. \u5173\u95ed omicron"},{"location":"usage/#2-\u6570\u636e\u8bfb\u53d6","text":"","title":"2. \u6570\u636e\u8bfb\u53d6"},{"location":"usage/#21-\u8bc1\u5238\u5217\u8868","text":"Security \u548c Query \u63d0\u4f9b\u4e86\u8bc1\u5238\u5217\u8868\u548c\u67e5\u8be2\u64cd\u4f5c\u3002\u67e5\u8be2\u88ab\u8bbe\u8ba1\u6210\u4e3a\u94fe\u5f0f API\u3002\u901a\u5e38\uff0c\u6211\u4eec\u901a\u8fc7\u8c03\u7528 Security.select() \u6765\u751f\u6210\u4e00\u4e2a Query \u5bf9\u8c61\uff0c\u7136\u540e\u53ef\u4ee5\u9488\u5bf9\u6b64\u5bf9\u8c61\uff0c\u8fdb\u884c\u5404\u79cd\u8fc7\u67e5\u8be2\u8fc7\u6ee4\uff0c\u6700\u540e\uff0c\u6211\u4eec\u8c03\u7528 query.eval() \u65b9\u6cd5\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\uff0c\u5e76\u8fd4\u56de\u7ed3\u679c\u3002","title":"2.1. \u8bc1\u5238\u5217\u8868"},{"location":"usage/#211-\u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801","text":"\u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\u6765\u83b7\u53d6\u67d0\u4e00\u5929\u7684\u8bc1\u5238\u5217\u8868\uff1a 1 2 3 4 5 6 7 # 4. ASSUME YOU HAVE OMICRON INIT dt = datetime . date ( 2022 , 5 , 20 ) query = Security . select ( dt ) codes = await query . eval () print ( codes ) # THE OUTPUTS IS LIKE [\"000001.XSHE\", \"000004.XSHE\", ...] \u8fd9\u91cc\u7684 dt \u5982\u679c\u6ca1\u6709\u63d0\u4f9b\u7684\u8bdd\uff0c\u5c06\u4f7f\u7528\u6700\u65b0\u7684\u8bc1\u5238\u5217\u8868\u3002\u4f46\u5728\u56de\u6d4b\u4e2d\uff0c\u60a8\u901a\u5e38\u4e0d\u540c\u65f6\u95f4\u7684\u8bc1\u5238\u5217\u8868\uff0c\u56e0\u6b64\uff0c dt \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\u662f\u5fc5\u987b\u7684\uff0c\u5426\u5219\uff0c\u60a8\u5c06\u5f15\u5165\u672a\u6765\u6570\u636e\u3002","title":"2.1.1. \u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801"},{"location":"usage/#212-\u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570","text":"1 2 3 query = Security . select ( dt ) codes = await query . types ([ \"stock\" ]) . eval () print ( codes )","title":"2.1.2. \u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570"},{"location":"usage/#213-\u6392\u9664\u67d0\u79cd\u80a1\u7968\u8bc1\u5238","text":"1 2 3 query = Security . select ( dt ) codes = await query . exclude_st () . exclude_kcb () . exclude_cyb () . eval () print ( codes )","title":"2.1.3. \u6392\u9664\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09"},{"location":"usage/#214-\u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\u8bc1\u5238","text":"1 2 3 4 query = Security . select ( dt ) codes = await query . only_kcb () . only_st () . only_cyb () . eval () print ( codes ) #\u5f97\u5230\u7a7a\u5217\u8868","title":"2.1.4. \u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09"},{"location":"usage/#215-\u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2","text":"A \u80a1\u7684\u8bc1\u5238\u5728\u6807\u8bc6\u4e0a\uff0c\u4e00\u822c\u6709\u4ee3\u7801\uff08code \u6216\u8005 symbol)\u3001\u62fc\u97f3\u7b80\u5199 (name) \u548c\u6c49\u5b57\u8868\u793a\u540d (display_name) \u4e09\u79cd\u6807\u8bc6\u3002\u6bd4\u5982\u4e2d\u56fd\u5e73\u5b89\uff0c\u5176\u4ee3\u7801\u4e3a 601318.XSHG; \u5176\u62fc\u97f3\u7b80\u5199\u4e3a ZGPA\uff1b\u800c\u4e2d\u56fd\u5e73\u5b89\u88ab\u79f0\u4e3a\u5b83\u7684\u522b\u540d ( alias )\u3002 \u5982\u679c\u8981\u67e5\u8be2\u6240\u6709\u4e2d\u5b57\u5934\u7684\u80a1\u7968\uff1a 1 2 3 query = Security . select ( dt ) codes = await query . alias_like ( \"\u4e2d\" ) . eval () print ( codes )","title":"2.1.5. \u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2"},{"location":"usage/#216-\u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f","text":"\u901a\u8fc7\u524d\u9762\u7684\u67e5\u8be2\u6211\u4eec\u53ef\u4ee5\u5f97\u5230\u4e00\u4e2a\u8bc1\u5238\u5217\u8868\uff0c\u5982\u679c\u8981\u5f97\u5230\u5177\u4f53\u7684\u4fe1\u606f\uff0c\u53ef\u4ee5\u901a\u8fc7 info \u63a5\u53e3\u6765\u67e5\u8be2\uff1a 1 2 3 dt = datetime . date ( 2022 , 5 , 20 ) info = await Security . info ( \"688001.XSHG\" , dt ) print ( info ) \u8f93\u51fa\u4e3a\uff1a 1 2 3 4 5 6 7 8 { ' t ype' : 's t ock' , 'display_ na me' : '\u534e\u5174\u6e90\u521b' , 'alias' : '\u534e\u5174\u6e90\u521b' , 'e n d' : da tet ime.da te ( 2200 , 1 , 1 ) , 's tart ' : da tet ime.da te ( 2019 , 7 , 22 ) , ' na me' : 'HXYC' }","title":"2.1.6. \u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f"},{"location":"usage/#22-\u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97","text":"Omicron \u4e0d\u4ec5\u63d0\u4f9b\u4e86\u4ea4\u6613\u65e5\u5386\uff0c\u4e0e\u5176\u5b83\u91cf\u5316\u6846\u67b6\u76f8\u6bd4\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u4e0e\u65f6\u95f4\u76f8\u5173\u7684\u8fd0\u7b97\u64cd\u4f5c\u3002\u8fd9\u4e9b\u64cd\u4f5c\u90fd\u6709\u8be6\u7ec6\u7684\u6587\u6863\u548c\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 TimeFrame \u6765\u8fdb\u4e00\u6b65\u9605\u8bfb\u3002 omicron \u4e2d\uff0c\u5e38\u5e38\u4f1a\u9047\u5230\u65f6\u95f4\u5e27 (Time Frame) \u8fd9\u4e2a\u6982\u5ff5\u3002\u56e0\u4e3a\u884c\u60c5\u6570\u636e\u90fd\u662f\u6309\u4e00\u5b9a\u7684\u65f6\u95f4\u957f\u5ea6\u7ec4\u7ec7\u7684\uff0c\u6bd4\u5982 5 \u5206\u949f\uff0c1 \u5929\uff0c\u7b49\u7b49\u3002\u56e0\u6b64\uff0c\u5728 omicron \u4e2d\uff0c\u6211\u4eec\u7ecf\u5e38\u4f7f\u7528\u67d0\u4e2a\u65f6\u95f4\u7247\u7ed3\u675f\u7684\u65f6\u95f4\uff0c\u6765\u6807\u8bc6\u8fd9\u4e2a\u65f6\u95f4\u7247\uff0c\u5e76\u5c06\u5176\u79f0\u4e4b\u4e3a\u5e27 (Time Frame)\u3002 omicron \u4e2d\uff0c\u6211\u4eec\u652f\u6301\u7684\u65f6\u95f4\u5e27\u5305\u62ec\u65e5\u5185\u7684\u5206\u949f\u5e27 (FrameType.MIN1), 5 \u5206\u949f\u5e27 (FrameType.MIN5), 15 \u5206\u949f\u5e27\u300130 \u5206\u949f\u5e27\u548c 60 \u5206\u949f\u5e27\uff0c\u4ee5\u53ca\u65e5\u7ebf\u7ea7\u522b\u7684 FrameType.DAY, FrameType.WEEK \u7b49\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u7c7b\u578b\u8bf4\u660e\uff0c\u8bf7\u53c2\u89c1 coretypes omicron \u63d0\u4f9b\u7684\u4ea4\u6613\u65e5\u5386\u8d77\u59cb\u4e8e 2005 \u5e74 1 \u6708 4 \u65e5\u3002\u63d0\u4f9b\u7684\u884c\u60c5\u6570\u636e\uff0c\u6700\u65e9\u4ece\u8fd9\u4e00\u5929\u8d77\u3002 \u5927\u81f4\u4e0a\uff0comicron \u63d0\u4f9b\u4e86\u4ee5\u4e0b\u65f6\u95f4\u5e27\u64cd\u4f5c\uff1a","title":"2.2. \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97"},{"location":"usage/#221-\u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb","text":"\u5982\u679c\u4eca\u5929\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u60a8\u60f3\u5f97\u5230 100 \u5929\u524d\u7684\u4ea4\u6613\u65e5\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 day_shift: 1 2 3 4 from omicron import tf dt = datetime . date ( 2022 , 5 , 20 ) tf . day_shift ( dt , - 100 ) \u8f93\u51fa\u662f datetime.date(2021, 12, 16)\u3002\u5728\u8fd9\u91cc\uff0cday_shift \u7684\u7b2c\u4e8c\u4e2a\u53c2\u6570 n \u662f\u504f\u79fb\u91cf\uff0c\u5f53\u5b83\u5c0f\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u524d n \u4e2a\u4ea4\u6613\u65e5\uff1b\u5f53\u5b83\u5927\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u4e4b\u540e\u7684 n \u4e2a\u4ea4\u6613\u65e5\u3002 \u6bd4\u5982\u6709\u610f\u601d\u7684\u662f n == 0 \u7684\u65f6\u5019\u3002\u5bf9\u4e0a\u8ff0 dt \uff0cday_shift(dt, 0) \u5f97\u5230\u7684\u4ecd\u7136\u662f\u540c\u4e00\u5929\uff0c\u4f46\u5982\u679c dt \u662f 2022 \u5e74 5 \u6708 21 \u65e5\u662f\u5468\u516d\uff0c\u5219 day_shift(datetime.date(2022, 5, 21)) \u5c06\u8fd4\u56de 2022 \u5e74 5 \u6708 20 \u65e5\u3002\u56e0\u4e3a 5 \u6708 21 \u65e5\u8fd9\u4e00\u5929\u662f\u5468\u516d\uff0c\u4e0d\u662f\u4ea4\u6613\u65e5\uff0cday_shift \u5c06\u8fd4\u56de\u5176\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff0c\u8fd9\u5728\u591a\u6570\u60c5\u51b5\u4e0b\u4f1a\u975e\u5e38\u65b9\u4fbf\u3002 \u9664\u4e86 day_shift \u5916\uff0ctimeframe \u8fd8\u63d0\u4f9b\u4e86\u7c7b\u4f3c\u51fd\u6570\u6bd4\u5982 week_shift \u7b49\u3002\u4e00\u822c\u5730\uff0c\u60a8\u53ef\u4ee5\u7528 shift(dt, n, frame_type) \u6765\u5bf9\u4efb\u610f\u652f\u6301\u7684\u65f6\u95f4\u8fdb\u884c\u504f\u79fb\u3002","title":"2.2.1. \u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb"},{"location":"usage/#222-\u8fb9\u754c\u64cd\u4f5c-ceiling-\u548c-floor","text":"\u5f88\u591a\u65f6\u5019\u6211\u4eec\u9700\u8981\u77e5\u9053\u5177\u4f53\u7684\u67d0\u4e2a\u65f6\u95f4\u70b9 (moment) \u6240\u5c5e\u7684\u5e27\u3002\u5982\u679c\u8981\u53d6\u5176\u4e0a\u4e00\u5e27\uff0c\u5219\u53ef\u4ee5\u7528 floor \u64cd\u4f5c\uff0c\u53cd\u4e4b\uff0c\u4f7f\u7528 ceiling\u3002 1 2 tf . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) # OUTPUT IS DATETIME.DATE(2005, 1, 7)","title":"2.2.2. \u8fb9\u754c\u64cd\u4f5c ceiling \u548c floor"},{"location":"usage/#223-\u65f6\u95f4\u8f6c\u6362","text":"\u4e3a\u4e86\u52a0\u5feb\u901f\u5ea6\uff0c\u4ee5\u53ca\u65b9\u4fbf\u6301\u4e45\u5316\u5b58\u50a8\uff0c\u5728 timeframe \u5185\u90e8\uff0c\u6709\u65f6\u5019\u4f7f\u7528\u6574\u6570\u6765\u8868\u793a\u65f6\u95f4\u3002\u6bd4\u5982 20220502 \u8868\u793a\u7684\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u800c 202205220931 \u5219\u8868\u793a 2022 \u5e74 5 \u6708 20 \u65e5 9 \u65f6 31 \u5206\u949f\u3002 \u8fd9\u79cd\u8868\u793a\u6cd5\uff0c\u6709\u65f6\u5019\u8981\u6c42\u6211\u4eec\u8fdb\u884c\u4e00\u4e9b\u8f6c\u6362\uff1a 1 2 3 4 5 6 7 8 9 10 # \u5c06\u6574\u6570\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u4e3a\u65e5\u671f tf . int2date ( 20220522 ) # datetime.date(2022, 5, 22) # \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a\u65f6\u95f4 tf . int2time ( 202205220931 ) # datetime.datetime(2022, 5, 22, 9, 31) # \u5c06\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u6574\u6570 tf . date2int ( datetime . date ( 2022 , 5 , 22 )) # 20220520 # \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u4e3a\u65f6\u95f4 tf . date2time ( datetime . datetime ( 2022 , 5 , 22 , 9 , 21 )) # 202205220921","title":"2.2.3. \u65f6\u95f4\u8f6c\u6362"},{"location":"usage/#224-\u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27","text":"\u6709\u65f6\u5019\u6211\u4eec\u9700\u8981\u5f97\u5230 start \u548c end \u4e4b\u95f4\u67d0\u4e2a\u65f6\u95f4\u5e27\u7c7b\u578b\u7684\u6240\u6709\u65f6\u95f4\u5e27\uff1a 1 2 3 4 start = arrow . get ( '2020-1-13 10:00' ) . naive end = arrow . get ( '2020-1-13 13:30' ) . naive tf . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Important \u4e0a\u9762\u7684\u793a\u4f8b\u4e2d\uff0c\u51fa\u73b0\u4e86\u53ef\u80fd\u60a8\u4e0d\u592a\u719f\u6089\u7684 naive \u5c5e\u6027\u3002\u5b83\u6307\u7684\u662f\u53d6\u4e0d\u5e26\u65f6\u533a\u7684\u65f6\u95f4\u3002\u5728 python \u4e2d\uff0c\u65f6\u95f4\u53ef\u4ee5\u5e26\u65f6\u533a\uff08timezone-aware) \u548c\u4e0d\u5e26\u65f6\u533a (naive)\u3002 \u5982\u679c\u60a8\u4f7f\u7528 datetime.datetime(2022, 5, 20)\uff0c\u5b83\u5c31\u662f\u4e0d\u5e26\u65f6\u533a\u7684\uff0c\u9664\u975e\u60a8\u4e13\u95e8\u6307\u5b9a\u65f6\u533a\u3002 \u5728 omicron \u4e2d\uff0c\u6211\u4eec\u5728\u7edd\u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u4ec5\u4f7f\u7528 naive \u8868\u793a\u7684\u65f6\u95f4\uff0c\u5373\u4e0d\u5e26\u65f6\u533a\uff0c\u5e76\u4e14\u5047\u5b9a\u65f6\u533a\u4e3a\u4e1c\u516b\u533a\uff08\u5373\u5317\u4eac\u65f6\u95f4\uff09\u3002 \u5982\u679c\u60a8\u53ea\u77e5\u9053\u7ed3\u675f\u65f6\u95f4\uff0c\u9700\u8981\u5411\u524d\u53d6 n \u4e2a\u65f6\u95f4\u5e27\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 get_frames_by_count \u3002 \u5982\u679c\u60a8\u53ea\u662f\u9700\u8981\u77e5\u9053\u5728 start \u548c end \u4e4b\u95f4\uff0c\u603b\u5171\u6709\u591a\u5c11\u4e2a\u5e27\uff0c\u8bf7\u4f7f\u7528 count_frames : 1 2 3 start = datetime . date ( 2019 , 12 , 21 ) end = datetime . date ( 2019 , 12 , 21 ) tf . count_frames ( start , end , FrameType . DAY ) \u8f93\u51fa\u5c06\u662f 1\u3002\u4e0a\u8ff0\u65b9\u6cd5\u8fd8\u6709\u4e00\u4e2a\u5feb\u6377\u65b9\u6cd5\uff0c\u5373 count_day_frames \uff0c\u5e76\u4e14\uff0c\u5bf9 week, month, quaters \u4e5f\u662f\u4e00\u6837\u3002","title":"2.2.4. \u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27"},{"location":"usage/#23-\u8bfb\u53d6\u884c\u60c5\u6570\u636e","text":"\u73b0\u5728\uff0c\u8ba9\u6211\u4eec\u6765\u83b7\u53d6\u4e00\u6bb5\u884c\u60c5\u6570\u636e\uff1a 1 2 3 4 code = \"000001.XSHE\" end = datetime . date ( 2022 , 5 , 20 ) bars = await Stock . get_bars ( code , 10 , FrameType . DAY , end ) \u8fd4\u56de\u7684 bars \u5c06\u662f\u4e00\u4e2a numpy structured array, \u5176\u7c7b\u578b\u4e3a bars_dtype \u3002\u4e00\u822c\u5730\uff0c\u5b83\u5305\u62ec\u4e86\u4ee5\u4e0b\u5b57\u6bb5\uff1a 1 2 3 4 5 6 7 8 * frame\uff08\u5e27\uff09 * open\uff08\u5f00\u76d8\u4ef7\uff09 * high\uff08\u6700\u9ad8\u4ef7\uff09 * low\uff08\u6700\u4f4e\u4ef7\uff09 * close\uff08\u6536\u76d8\u4ef7\uff09 * volume\uff08\u6210\u4ea4\u91cf\uff0c\u80a1\u6570\uff09 * amount\uff08\u6210\u4ea4\u989d\uff09 * factor\uff08\u590d\u6743\u56e0\u5b50\uff09 \u7f3a\u7701\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u662f\u5230 end \u4e3a\u6b62\u7684\u524d\u590d\u6743\u6570\u636e\u3002\u4f60\u53ef\u4ee5\u901a\u53c2\u6570 fq = False \u5173\u95ed\u5b83\uff0c\u6765\u83b7\u5f97\u4e0d\u590d\u6743\u6570\u636e\uff0c\u5e76\u4ee5\u6b64\u81ea\u884c\u8ba1\u7b97\u540e\u590d\u6743\u6570\u636e\u3002 \u5982\u679c\u8981\u83b7\u53d6\u67d0\u4e2a\u65f6\u95f4\u6bb5\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528 get_bars_in_range \u3002 \u4e0a\u8ff0\u65b9\u6cd5\u603b\u662f\u5c3d\u6700\u5927\u53ef\u80fd\u8fd4\u56de\u5b9e\u65f6\u6570\u636e\uff0c\u5982\u679c end \u4e3a\u5f53\u524d\u65f6\u95f4\u7684\u8bdd\uff0c\u4f46\u7531\u4e8e omega \u540c\u6b65\u5ef6\u65f6\u662f\u4e00\u5206\u949f\uff0c\u6240\u4ee5\u884c\u60c5\u6570\u636e\u6700\u591a\u53ef\u80fd\u6162\u4e00\u5206\u949f\u3002\u5982\u679c\u8981\u83b7\u53d6\u66f4\u5b9e\u65f6\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u901a\u8fc7 get_latest_price \u65b9\u6cd5\u3002 \u8981\u83b7\u6da8\u8dcc\u505c\u4ef7\u683c\u548c\u6807\u5fd7\uff0c\u8bf7\u4f7f\u7528: get_trade_price_limits trade_price_limits_flags trade_price_limit_flags_ex","title":"2.3. \u8bfb\u53d6\u884c\u60c5\u6570\u636e"},{"location":"usage/#24-\u677f\u5757\u6570\u636e","text":"\u63d0\u4f9b\u540c\u82b1\u987a\u677f\u5757\u884c\u4e1a\u677f\u5757\u548c\u6982\u5ff5\u677f\u5757\u6570\u636e\u3002\u5728\u4f7f\u7528\u672c\u6a21\u5757\u4e4b\u524d\uff0c\u9700\u8981\u8fdb\u884c\u521d\u59cb\u5316\uff1a 1 2 3 # \u8bf7\u5148\u8fdb\u884comicron\u521d\u59cb\u5316\uff0c\u7565 from omicron.models.board import Board , BoardType Board . init ( '192.168.100.101' ) \u6b64\u5904\u7684IP\u4e3a\u5b89\u88c5omega\u670d\u52a1\u5668\u7684ip\u3002 \u901a\u8fc7 board_list \u6765\u67e5\u8be2\u6240\u6709\u7684\u677f\u5757\u3002 \u5176\u5b83\u65b9\u6cd5\u8bf7\u53c2\u770b API\u6587\u6863","title":"2.4. \u677f\u5757\u6570\u636e"},{"location":"usage/#3-\u7b56\u7565\u7f16\u5199","text":"omicron \u901a\u8fc7 strategy \u6765\u63d0\u4f9b\u7b56\u7565\u6846\u67b6\u3002\u901a\u8fc7\u8be5\u6846\u67b6\u7f16\u5199\u7684\u7b56\u7565\uff0c\u53ef\u4ee5\u5728\u5b9e\u76d8\u548c\u56de\u6d4b\u4e4b\u95f4\u65e0\u7f1d\u8f6c\u6362 -- \u6839\u636e\u521d\u59cb\u5316\u65f6\u4f20\u5165\u7684\u670d\u52a1\u5668\u4e0d\u540c\u800c\u81ea\u52a8\u5207\u6362\u3002 omicron \u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u7684 \u53cc\u5747\u7ebf\u7b56\u7565 \u4f5c\u4e3a\u7b56\u7565\u7f16\u5199\u7684\u793a\u8303\uff0c\u53ef\u7ed3\u5408\u5176\u6e90\u7801\uff0c\u4ee5\u53ca\u672c\u6587\u6863\u4e2d\u7684 \u5b8c\u6574\u7b56\u7565\u793a\u4f8b \u5728notebook\u4e2d\u8fd0\u884c\u67e5\u770b\u3002 \u7b56\u7565\u6846\u67b6\u63d0\u4f9b\u4e86\u56de\u6d4b\u9a71\u52a8\u903b\u8f91\u53ca\u4e00\u4e9b\u57fa\u672c\u51fd\u6570\u3002\u8981\u7f16\u5199\u81ea\u5df1\u7684\u7b56\u7565\uff0c\u60a8\u9700\u8981\u4ece\u57fa\u7c7b BaseStrategy \u6d3e\u751f\u51fa\u81ea\u5df1\u7684\u5b50\u7c7b\uff0c\u5e76\u6539\u5199\u5b83\u7684 predict \u65b9\u6cd5\u6765\u5b9e\u73b0\u8c03\u4ed3\u6362\u80a1\u3002 \u7b56\u7565\u6846\u67b6\u4f9d\u8d56\u4e8e zillionare-trader-client \uff0c\u5728\u56de\u6d4b\u65f6\uff0c\u9700\u8981\u6709 zillionare-backtesting \u63d0\u4f9b\u56de\u6d4b\u670d\u52a1\u3002\u5728\u5b9e\u76d8\u65f6\uff0c\u9700\u8981 zilllionare-gm-adaptor \u6216\u8005\u5176\u5b83\u5b9e\u76d8\u4ea4\u6613\u7f51\u5173\u63d0\u4f9b\u670d\u52a1\u3002 \u7b56\u7565\u4ee3\u7801\u53ef\u4ee5\u4e0d\u52a0\u4fee\u6539\uff0c\u5373\u53ef\u4f7f\u7528\u4e8e\u56de\u6d4b\u548c\u5b9e\u76d8\u4e24\u79cd\u573a\u666f\u3002","title":"3. \u7b56\u7565\u7f16\u5199"},{"location":"usage/#31-\u56de\u6d4b\u573a\u666f","text":"\u5b9e\u73b0\u7b56\u7565\u56de\u6d4b\uff0c\u4e00\u822c\u9700\u8981\u8fdb\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a 1. \u4ece\u6b64\u57fa\u7c7b\u6d3e\u751f\u51fa\u4e00\u4e2a\u7b56\u7565\u5b50\u7c7b\uff0c\u6bd4\u5982sma.py 2. \u5b50\u7c7b\u9700\u8981\u91cd\u8f7d predict \u65b9\u6cd5\uff0c\u6839\u636e\u5f53\u524d\u4f20\u5165\u7684\u65f6\u95f4\u5e27\u548c\u5e27\u7c7b\u578b\u53c2\u6570\uff0c\u83b7\u53d6\u6570\u636e\u5e76\u8fdb\u884c\u5904\u7406\uff0c\u8bc4\u4f30\u51fa\u4ea4\u6613\u4fe1\u53f7\u3002 3. \u5b50\u7c7b\u6839\u636e\u4ea4\u6613\u4fe1\u53f7\uff0c\u5728 predict \u65b9\u6cd5\u91cc\uff0c\u8c03\u7528\u57fa\u7c7b\u7684 buy \u548c sell \u65b9\u6cd5\u6765\u8fdb\u884c\u4ea4\u6613 4. \u751f\u6210\u7b56\u7565\u5b9e\u4f8b\uff0c\u901a\u8fc7\u5b9e\u4f8b\u8c03\u7528 backtest \u65b9\u6cd5\u6765\u8fdb\u884c\u56de\u6d4b\uff0c\u8be5\u65b9\u6cd5\u5c06\u6839\u636e\u7b56\u7565\u6784\u5efa\u65f6\u6307\u5b9a\u7684\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4\u3001\u7ec8\u6b62\u65f6\u95f4\u3001\u5e27\u7c7b\u578b\uff0c\u9010\u5e27\u751f\u6210\u5404\u4e2a\u65f6\u95f4\u5e27\uff0c\u5e76\u8c03\u7528\u5b50\u7c7b\u7684 predict \u65b9\u6cd5\u3002\u5982\u679c\u8c03\u7528\u65f6\u6307\u5b9a\u4e86 portfolio \u548c min_bars \u53c2\u6570\uff0c backtest \u8fd8\u5c06\u8fdb\u884c\u6570\u636e\u9884\u53d6\uff0c\u5e76\u5c06\u622a\u6b62\u5230\u5f53\u524d\u56de\u6d4b\u5e27\u65f6\u7684\u6570\u636e\u4f20\u5165\u3002 4. \u5728\u4ea4\u6613\u7ed3\u675f\u65f6\uff0c\u8c03\u7528 plot_metrics \u65b9\u6cd5\u6765\u83b7\u53d6\u5982\u4e0b\u6240\u793a\u7684\u56de\u6d4b\u6307\u6807\u56fe \u5982\u4f55\u6d3e\u751f\u5b50\u7c7b\uff0c\u53ef\u4ee5\u53c2\u8003 sma \u6e90\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 from omicron.strategy.sma import SMAStrategy sma = SMAStrategy ( url = \"\" , # the url of either backtest server, or trade server is_backtest = True , start = datetime . date ( 2023 , 2 , 3 ), end = datetime . date ( 2023 , 4 , 28 ), frame_type = FrameType . DAY , ) await sma . backtest ( portfolio = [ \"600000.XSHG\" , min_bars = 20 ]) \u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u8981\u6307\u5b9a is_backtest=True \u548c start , end \u53c2\u6570\u3002","title":"3.1. \u56de\u6d4b\u573a\u666f"},{"location":"usage/#32-\u56de\u6d4b\u62a5\u544a","text":"\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\uff0c\u5728notebook\u4e2d\u7ed8\u5236\u56de\u6d4b\u62a5\u544a\uff1a 1 await sma . plot_metrics () \u8fd9\u5c06\u7ed8\u5236\u51fa\u7c7b\u4f3c\u4ee5\u4e0b\u56fe\uff1a","title":"3.2. \u56de\u6d4b\u62a5\u544a"},{"location":"usage/#321-\u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807","text":"Info Since 2.0.0.a76 \u9996\u5148\uff0c\u6211\u4eec\u53ef\u4ee5\u5728\u7b56\u7565\u7c7b\u7684predict\u65b9\u6cd5\u4e2d\u8ba1\u7b97\u51fa\u6280\u672f\u6307\u6807\uff0c\u5e76\u4fdd\u5b58\u5230\u6210\u5458\u53d8\u91cf\u4e2d\u3002\u5728\u4e0b\u9762\u7684\u793a\u4f8b\u4ee3\u7801\u4e2d\uff0c\u6211\u4eec\u5c06\u6280\u672f\u6307\u6807\u53ca\u5f53\u65f6\u7684\u65f6\u95f4\u4fdd\u5b58\u5230\u4e86\u4e00\u4e2aindicators\u6570\u7ec4\u4e2d\uff08\u6ce8\u610f\u987a\u5e8f\uff01\uff09\uff0c\u7136\u540e\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u5728\u8c03\u7528 plot_metrics\u65f6\uff0c\u5c06\u5176\u4f20\u5165\u5373\u53ef\u3002 1 2 3 4 5 6 7 indicators = [ (datetime.date(2021, 2, 3), 20.1), (datetime.date(2021, 2, 4), 20.2), ..., (datetime.date(2021, 4, 1), 20.3) ] await sma.plot_metrics(indicator) \u65f6\u95f4\u53ea\u80fd\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u65f6\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u4ea7\u751f\u65e0\u6cd5\u4e0e\u5750\u6807\u8f74\u5bf9\u9f50\u7684\u60c5\u51b5\u3002 \u52a0\u5165\u7684\u6307\u6807\u9ed8\u8ba4\u53ea\u663e\u793a\u5728legend\u4e2d\uff0c\u5982\u679c\u8981\u663e\u793a\u5728\u4e3b\u56fe\u4e0a\uff0c\u9700\u8981\u70b9\u51fblegend\u8fdb\u884c\u663e\u793a\u3002 \u6307\u6807\u9664\u53ef\u4ee5\u53e0\u52a0\u5728\u4e3b\u56fe\u4e0a\u4e4b\u5916\uff0c\u8fd8\u4f1a\u51fa\u73b0\u5728\u57fa\u51c6\u7ebf\u7684hoverinfo\u4e2d\uff08\u5373\u4f7f\u6307\u6807\u7684\u8ba1\u7b97\u4e0e\u57fa\u51c6\u7ebf\u65e0\u5173\uff09\uff0c\u53c2\u89c1\u4e0a\u56fe\u4e2d\u7684\u201c\u6307\u6807\u201d\u884c\u3002","title":"3.2.1. \u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807"},{"location":"usage/#33-\u4f7f\u7528\u6570\u636e\u9884\u53d6","text":"Info since version 2.0.0-alpha76 \u5728\u56de\u6d4b\u4e2d\uff0c\u53ef\u4ee5\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u6570\u636e\u9884\u53d6\uff0c\u4ee5\u52a0\u5feb\u56de\u6d4b\u901f\u5ea6\u3002\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a \u5982\u679c\u7b56\u7565\u5728\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 \u5982\u679c\u5728\u56de\u6d4b\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u5077\u770b\u672a\u6765\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528peek\u65b9\u6cd5\u3002","title":"3.3. \u4f7f\u7528\u6570\u636e\u9884\u53d6"},{"location":"usage/#34-\u5b8c\u6574sma\u56de\u6d4b\u793a\u4f8b","text":"\u4ee5\u4e0b\u7b56\u7565\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5e76\u4e14\u9700\u8981\u4e8b\u5148\u5b89\u88c5omega\u670d\u52a1\u5668\u540c\u6b65\u6570\u636e\uff0c\u5e76\u6b63\u786e\u914d\u7f6eomicron\u3002 \u8be5\u793a\u4f8b\u5728\u300a\u5927\u5bcc\u7fc1\u91cf\u5316\u8bfe\u7a0b\u300b\u8bfe\u4ef6\u73af\u5883\u4e0b\u53ef\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () sec = \"600000.XSHG\" start = datetime . date ( 2022 , 1 , 4 ) end = datetime . date ( 2023 , 1 , 1 ) sma = SMAStrategy ( sec , url = cfg . backtest . url , is_backtest = True , start = start , end = end , frame_type = FrameType . DAY ) await sma . backtest ( portfolio = [ sec ], min_bars = 10 , stop_on_error = False ) await sma . plot_metrics ( sma . indicators )","title":"3.4. \u5b8c\u6574SMA\u56de\u6d4b\u793a\u4f8b"},{"location":"usage/#35-\u5b9e\u76d8","text":"\u5728\u5b9e\u76d8\u73af\u5883\u4e0b\uff0c\u4f60\u8fd8\u9700\u8981\u5728\u5b50\u7c7b\u4e2d\u52a0\u5165\u5468\u671f\u6027\u4efb\u52a1(\u6bd4\u5982\u6bcf\u5206\u949f\u6267\u884c\u4e00\u6b21\uff09\uff0c\u5728\u8be5\u4efb\u52a1\u4e2d\u8c03\u7528 predict \u65b9\u6cd5\u6765\u5b8c\u6210\u4ea4\u6613\uff0c\u5982\u4ee5\u4e0b\u793a\u4f8b\u6240\u793a\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType from apscheduler.schedulers.asyncio import AsyncIOScheduler cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () async def daily_job (): sma = SMAStrategy ( sec , url = cfg . traderserver . url , is_backtest = False , frame_type = FrameType . DAY ) bars = await Stock . get_bars ( sma . _sec , 20 , FrameType . DAY ) await sma . predict ( barss = { sma . _sec : bars }) async def main (): scheduler = AsyncIOScheduler () scheduler . add_job ( daily_job , 'cron' , hour = 14 , minute = 55 ) scheduler . start () \u7b56\u7565\u4ee3\u7801\u65e0\u987b\u4fee\u6539\u3002 \u8be5\u7b56\u7565\u5c06\u81ea\u52a8\u5728\u6bcf\u5929\u768414\uff1a55\u8fd0\u884c\uff0c\u4ee5\u5224\u65ad\u662f\u5426\u8981\u8fdb\u884c\u8c03\u4ed3\u6362\u80a1\u3002\u60a8\u9700\u8981\u989d\u5916\u5224\u65ad\u5f53\u5929\u662f\u5426\u4e3a\u4ea4\u6613\u65e5\u3002","title":"3.5. \u5b9e\u76d8"},{"location":"usage/#4-\u7ed8\u56fe","text":"omicron \u901a\u8fc7 Candlestick \u63d0\u4f9b\u4e86 k \u7ebf\u7ed8\u5236\u529f\u80fd\u3002\u9ed8\u8ba4\u5730\uff0c\u5b83\u5c06\u7ed8\u5236\u4e00\u5e45\u663e\u793a 120 \u4e2a bar\uff0c\u53ef\u62d6\u52a8\uff08\u4ee5\u52a0\u8f7d\u66f4\u591a bar)\uff0c\u5e76\u4e14\u53ef\u4ee5\u53e0\u52a0\u526f\u56fe\u3001\u4e3b\u56fe\u53e0\u52a0\u5404\u79cd\u6307\u6807\u7684 k \u7ebf\u56fe\uff1a \u4e0a\u56fe\u663e\u793a\u4e86\u81ea\u52a8\u68c0\u6d4b\u51fa\u6765\u7684\u5e73\u53f0\u3002\u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u8fdb\u884c\u9876\u5e95\u81ea\u52a8\u68c0\u6d4b\u548c\u6807\u6ce8\u3002 Note \u901a\u8fc7\u6307\u5b9a width \u53c2\u6570\uff0c\u53ef\u4ee5\u5f71\u54cd\u521d\u59cb\u52a0\u8f7d\u7684bar\u7684\u6570\u91cf\u3002 omicron \u901a\u8fc7 metris \u63d0\u4f9b\u56de\u6d4b\u62a5\u544a\u3002\u8be5\u62a5\u544a\u7c7b\u4f3c\u4e8e\uff1a \u5b83\u540c\u6837\u63d0\u4f9b\u53ef\u62d6\u52a8\u7684\u7ed8\u56fe\uff0c\u5e76\u4e14\u5728\u4e70\u5356\u70b9\u4e0a\u53ef\u4ee5\u901a\u8fc7\u9f20\u6807\u60ac\u505c\uff0c\u663e\u793a\u4e70\u5356\u70b9\u4fe1\u606f\u3002 omicron \u7684\u7ed8\u56fe\u529f\u80fd\u53ea\u80fd\u5728 notebook \u4e2d\u4f7f\u7528\u3002","title":"4. \u7ed8\u56fe"},{"location":"usage/#5-\u8bc4\u4f30\u6307\u6807","text":"omicron \u63d0\u4f9b\u4e86 mean_absolute_error \u51fd\u6570\u548c pct_error \u51fd\u6570\u3002\u5b83\u4eec\u5728 scipy \u6216\u8005\u5176\u5b83\u5e93\u4e2d\u4e5f\u80fd\u627e\u5230\uff0c\u4e3a\u4e86\u65b9\u4fbf\u4e0d\u719f\u6089\u8fd9\u4e9b\u7b2c\u4e09\u65b9\u5e93\u7684\u4f7f\u7528\u8005\uff0c\u6211\u4eec\u5185\u7f6e\u4e86\u8fd9\u4e2a\u5e38\u6307\u6807\u3002 \u5bf9\u4e00\u4e9b\u5e38\u89c1\u7684\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\uff0c\u6211\u4eec\u5f15\u7528\u4e86 empyrical \u4e2d\u7684\u76f8\u5173\u51fd\u6570\uff0c\u6bd4\u5982 alpha, beta, shapre_ratio\uff0c calmar_ratio \u7b49\u3002","title":"5. \u8bc4\u4f30\u6307\u6807"},{"location":"usage/#6-talib-\u5e93","text":"\u60a8\u5e94\u8be5\u628a\u8fd9\u91cc\u63d0\u4f9b\u7684\u51fd\u6570\u5f53\u6210\u5b9e\u9a8c\u6027\u7684\u3002\u8fd9\u4e9b API \u4e5f\u53ef\u80fd\u5728\u67d0\u5929\u88ab\u5e9f\u5f03\u3001\u91cd\u547d\u540d\u3001\u4fee\u6539\uff0c\u6216\u8005\u8fd9\u4e9b API \u5e76\u6ca1\u6709\u591a\u5927\u4f5c\u7528\uff0c\u6216\u8005\u5b83\u4eec\u7684\u5b9e\u73b0\u5b58\u5728\u9519\u8bef\u3002 \u4f46\u662f\uff0c\u5982\u679c\u6211\u4eec\u5c06\u6765\u4f1a\u629b\u5f03\u8fd9\u4e9b API \u7684\u8bdd\uff0c\u6211\u4eec\u4e00\u5b9a\u4f1a\u901a\u8fc7 depracted \u65b9\u6cd5\u63d0\u524d\u8fdb\u884c\u8b66\u544a\u3002","title":"6. TALIB \u5e93"},{"location":"usage/#7-\u6269\u5c55","text":"Python\u5f53\u4e2d\u7684\u56db\u820d\u4e94\u5165\u7528\u4e8e\u8bc1\u5238\u6295\u8d44\uff0c\u4f1a\u5e26\u6765\u4e25\u91cd\u7684\u95ee\u9898\uff0c\u6bd4\u5982\uff0c\u50cf round(0.3/2) \uff0c\u6211\u4eec\u671f\u671b\u5f97\u5230 0.2 \uff0c\u4f46\u5b9e\u9645\u4e0a\u4f1a\u5f97\u5230 0.1 \u3002\u8fd9\u79cd\u8bef\u5dee\u4e00\u65e6\u53d1\u751f\u6210\u5728\u4e00\u4e9b\u4f4e\u4ef7\u80a1\u8eab\u4e0a\uff0c\u5c06\u4f1a\u5e26\u6765\u975e\u5e38\u5927\u7684\u4e0d\u786e\u5b9a\u6027\u3002\u6bd4\u5982\uff0c1.945\u4fdd\u7559\u4e24\u4f4d\u5c0f\u6570\uff0c\u672c\u6765\u5e94\u8be5\u662f1.95\uff0c\u5982\u679c\u88ab\u8bef\u820d\u5165\u4e3a1.94\uff0c\u5219\u8bef\u5dee\u63a5\u8fd10.5%\uff0c\u8fd9\u5bf9\u6295\u8d44\u6765\u8bf4\u662f\u96be\u4ee5\u63a5\u53d7\u7684\u3002 Info \u5982\u679c\u4e00\u5929\u53ea\u8fdb\u884c\u4e00\u6b21\u4ea4\u6613\uff0c\u4e00\u6b21\u4ea4\u6613\u8bef\u5dee\u4e3a0.5%\uff0c\u4e00\u5e74\u7d2f\u79ef\u4e0b\u6765\uff0c\u8bef\u5dee\u5c06\u8fbe\u52302.5\u500d\u3002 \u6211\u4eec\u5728 decimals \u4e2d\u63d0\u4f9b\u4e86\u9002\u7528\u4e8e\u8bc1\u5238\u4ea4\u6613\u9886\u57df\u7684\u7248\u672c\uff0c math_round \u548c\u4ef7\u683c\u6bd4\u8f83\u51fd\u6570 price_equal \u3002 \u6211\u4eec\u8fd8\u5728 np \u4e2d\uff0c\u5bf9numpy\u4e2d\u7f3a\u5931\u7684\u4e00\u4e9b\u529f\u80fd\u8fdb\u884c\u4e86\u8865\u5145\uff0c\u6bd4\u5982 numpy_append_fields , fill_nan \u7b49\u3002","title":"7. \u6269\u5c55"},{"location":"api/board/","text":"Board \u00b6 Source code in omicron/models/board.py class Board : server_ip : str server_port : int measurement = \"board_bars_1d\" @classmethod def init ( cls , ip : str , port : int = 3180 ): cls . server_ip = ip cls . server_port = port @classmethod async def _rpc_call ( cls , url : str , param : str ): _url = f \"http:// { cls . server_ip } : { cls . server_port } /api/board/ { url } \" async with httpx . AsyncClient () as client : r = await client . post ( _url , json = param , timeout = 10 ) if r . status_code != 200 : logger . error ( f \"failed to post RPC call, { _url } : { param } , response: { r . content . decode () } \" ) return { \"rc\" : r . status_code } rsp = json . loads ( r . content ) return { \"rc\" : 200 , \"data\" : rsp } @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def new_concept_boards ( cls , days : int = 10 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def latest_concept_boards ( n : int = 3 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def new_concept_members ( days : int = 10 , prot : int = None ): raise NotImplementedError ( \"not ready\" ) @classmethod async def board_filter ( cls , industry = None , with_concepts : Optional [ List [ str ]] = None , without = [] ): raise NotImplementedError ( \"not ready\" ) @classmethod async def save_bars ( cls , bars ): client = get_influx_client () logger . info ( \"persisting bars to influxdb: %s , %d secs\" , cls . measurement , len ( bars ) ) await client . save ( bars , cls . measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) return True @classmethod async def get_last_date_of_bars ( cls , code : str ): # \u884c\u4e1a\u677f\u5757\u56de\u6eaf1\u5e74\u7684\u6570\u636e\uff0c\u6982\u5ff5\u677f\u5757\u53ea\u53d6\u5f53\u5e74\u7684\u6570\u636e code = f \" { code } .THS\" client = get_influx_client () now = datetime . datetime . now () dt_end = tf . day_shift ( now , 0 ) # 250 + 60: \u53ef\u4ee5\u5f97\u523060\u4e2aMA250\u7684\u70b9, \u9ed8\u8ba4K\u7ebf\u56fe120\u4e2a\u8282\u70b9 dt_start = tf . day_shift ( now , - 310 ) flux = ( Flux () . measurement ( cls . measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . tags ({ \"code\" : code }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return dt_start ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) bars = ds ( data ) secs = bars . to_records ( index = False ) . astype ( \"datetime64[s]\" ) _dt = secs [ - 1 ] . item () return _dt . date () @classmethod async def get_bars_in_range ( cls , code : str , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' ) async classmethod \u00b6 \u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme this function doesn't work Raise status 500 Returns: Type Description List [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] Source code in omicron/models/board.py @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_info_by_id ( board_id , full_mode = False ) async classmethod \u00b6 \u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: 1 2 3 4 5 6 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board . board_info_by_id ( board_code ) print ( board_info ) # \u5b57\u5178\u5f62\u5f0f # returns { 'code' : '881128' , 'name' : '\u6c7d\u8f66\u670d\u52a1' , 'stocks' : 14 } Returns: Type Description {'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or Source code in omicron/models/board.py @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_info_by_security ( security , _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board . board_info_by_security ( stock_code , _btype = BoardType . CONCEPT ) print ( stock_in_board ) # returns: [ { 'code' : '301715' , 'name' : '\u8bc1\u91d1\u6301\u80a1' , 'stocks' : 208 }, { 'code' : '308870' , 'name' : '\u6570\u5b57\u7ecf\u6d4e' , 'stocks' : 195 }, { 'code' : '308642' , 'name' : '\u6570\u636e\u4e2d\u5fc3' , 'stocks' : 188 }, ... , { 'code' : '300008' , 'name' : '\u65b0\u80fd\u6e90\u6c7d\u8f66' , 'stocks' : 603 } ] Returns: Type Description [{'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] Source code in omicron/models/board.py @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_list ( _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u83b7\u53d6\u677f\u5757\u5217\u8868 Parameters: Name Type Description Default _btype BoardType \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c BoardType.CONCEPT \u548c BoardType.INDUSTRY . Returns: Type Description List[List] \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a 1 2 3 4 5 6 [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] Source code in omicron/models/board.py @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] fuzzy_match_board_name ( pattern , _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: 1 2 3 4 5 6 7 8 9 10 11 await Board . fuzzy_match_board_name ( \"\u6c7d\u8f66\" , BoardType . INDUSTRY ) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66' , '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6' , '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0' , '881128 \u6c7d\u8f66\u670d\u52a1' , '884107 \u6c7d\u8f66\u670d\u52a1\u2162' , '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] Parameters: Name Type Description Default pattern str \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 required _btype BoardType \u67e5\u8be2\u7c7b\u578b Returns: Type Description \u5305\u542b\u4ee5\u4e0bkey\u7684dict code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) Source code in omicron/models/board.py @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] get_bars_in_range ( code , start , end = None ) async classmethod \u00b6 \u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[ start , end ]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 start = datetime . date ( 2022 , 9 , 1 ) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime . date ( 2023 , 3 , 1 ) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board . get_bars_in_range ( board_code , start , end ) bars [ - 3 :] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec . array ([ ( '2023-02-27T00:00:00' , 1117.748 , 1124.364 , 1108.741 , 1109.525 , 1.77208600e+08 , 1.13933095e+09 , 1. ), ( '2023-02-28T00:00:00' , 1112.246 , 1119.568 , 1109.827 , 1113.43 , 1.32828124e+08 , 6.65160380e+08 , 1. ), ( '2023-03-01T00:00:00' , 1122.233 , 1123.493 , 1116.62 , 1123.274 , 7.21718910e+07 , 3.71172850e+08 , 1. ) ], dtype = [( 'frame' , ' BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def new_concept_boards ( cls , days : int = 10 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def latest_concept_boards ( n : int = 3 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def new_concept_members ( days : int = 10 , prot : int = None ): raise NotImplementedError ( \"not ready\" ) @classmethod async def board_filter ( cls , industry = None , with_concepts : Optional [ List [ str ]] = None , without = [] ): raise NotImplementedError ( \"not ready\" ) @classmethod async def save_bars ( cls , bars ): client = get_influx_client () logger . info ( \"persisting bars to influxdb: %s , %d secs\" , cls . measurement , len ( bars ) ) await client . save ( bars , cls . measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) return True @classmethod async def get_last_date_of_bars ( cls , code : str ): # \u884c\u4e1a\u677f\u5757\u56de\u6eaf1\u5e74\u7684\u6570\u636e\uff0c\u6982\u5ff5\u677f\u5757\u53ea\u53d6\u5f53\u5e74\u7684\u6570\u636e code = f \" { code } .THS\" client = get_influx_client () now = datetime . datetime . now () dt_end = tf . day_shift ( now , 0 ) # 250 + 60: \u53ef\u4ee5\u5f97\u523060\u4e2aMA250\u7684\u70b9, \u9ed8\u8ba4K\u7ebf\u56fe120\u4e2a\u8282\u70b9 dt_start = tf . day_shift ( now , - 310 ) flux = ( Flux () . measurement ( cls . measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . tags ({ \"code\" : code }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return dt_start ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) bars = ds ( data ) secs = bars . to_records ( index = False ) . astype ( \"datetime64[s]\" ) _dt = secs [ - 1 ] . item () return _dt . date () @classmethod async def get_bars_in_range ( cls , code : str , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_filter_members()"},{"location":"api/board/#omicron.models.board.Board.board_info_by_id","text":"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: 1 2 3 4 5 6 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board . board_info_by_id ( board_code ) print ( board_info ) # \u5b57\u5178\u5f62\u5f0f # returns { 'code' : '881128' , 'name' : '\u6c7d\u8f66\u670d\u52a1' , 'stocks' : 14 } Returns: Type Description {'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or Source code in omicron/models/board.py @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_info_by_id()"},{"location":"api/board/#omicron.models.board.Board.board_info_by_security","text":"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board . board_info_by_security ( stock_code , _btype = BoardType . CONCEPT ) print ( stock_in_board ) # returns: [ { 'code' : '301715' , 'name' : '\u8bc1\u91d1\u6301\u80a1' , 'stocks' : 208 }, { 'code' : '308870' , 'name' : '\u6570\u5b57\u7ecf\u6d4e' , 'stocks' : 195 }, { 'code' : '308642' , 'name' : '\u6570\u636e\u4e2d\u5fc3' , 'stocks' : 188 }, ... , { 'code' : '300008' , 'name' : '\u65b0\u80fd\u6e90\u6c7d\u8f66' , 'stocks' : 603 } ] Returns: Type Description [{'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] Source code in omicron/models/board.py @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_info_by_security()"},{"location":"api/board/#omicron.models.board.Board.board_list","text":"\u83b7\u53d6\u677f\u5757\u5217\u8868 Parameters: Name Type Description Default _btype BoardType \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c BoardType.CONCEPT \u548c BoardType.INDUSTRY . Returns: Type Description List[List] \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a 1 2 3 4 5 6 [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] Source code in omicron/models/board.py @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_list()"},{"location":"api/board/#omicron.models.board.Board.fuzzy_match_board_name","text":"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: 1 2 3 4 5 6 7 8 9 10 11 await Board . fuzzy_match_board_name ( \"\u6c7d\u8f66\" , BoardType . INDUSTRY ) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66' , '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6' , '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0' , '881128 \u6c7d\u8f66\u670d\u52a1' , '884107 \u6c7d\u8f66\u670d\u52a1\u2162' , '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] Parameters: Name Type Description Default pattern str \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 required _btype BoardType \u67e5\u8be2\u7c7b\u578b Returns: Type Description \u5305\u542b\u4ee5\u4e0bkey\u7684dict code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) Source code in omicron/models/board.py @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"fuzzy_match_board_name()"},{"location":"api/board/#omicron.models.board.Board.get_bars_in_range","text":"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[ start , end ]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 start = datetime . date ( 2022 , 9 , 1 ) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime . date ( 2023 , 3 , 1 ) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board . get_bars_in_range ( board_code , start , end ) bars [ - 3 :] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec . array ([ ( '2023-02-27T00:00:00' , 1117.748 , 1124.364 , 1108.741 , 1109.525 , 1.77208600e+08 , 1.13933095e+09 , 1. ), ( '2023-02-28T00:00:00' , 1112.246 , 1119.568 , 1109.827 , 1113.43 , 1.32828124e+08 , 6.65160380e+08 , 1. ), ( '2023-03-01T00:00:00' , 1122.233 , 1123.493 , 1116.62 , 1123.274 , 7.21718910e+07 , 3.71172850e+08 , 1. ) ], dtype = [( 'frame' , ' BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2 np \u00b6 Extension function related to numpy array_math_round ( arr , digits ) \u00b6 \u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr ) array_price_equal ( price1 , price2 ) \u00b6 \u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2 bars_since ( condition , default = None ) \u00b6 Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default ) bin_cut ( arr , n ) \u00b6 \u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )] count_between ( arr , start , end ) \u00b6 \u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter dataframe_to_structured_array ( df , dtypes = None ) \u00b6 convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes ) dict_to_numpy_array ( d , dtype ) \u00b6 convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] find_runs ( x ) \u00b6 Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths floor ( arr , item ) \u00b6 \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ] join_by_left ( key , r1 , r2 , mask = True ) \u00b6 \u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret numpy_append_fields ( base , names , data , dtypes ) \u00b6 \u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))] replace_zero ( ts , replacement = None ) \u00b6 \u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] rolling ( x , win , func ) \u00b6 \u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results ) shift ( arr , start , offset ) \u00b6 \u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ] smallest_n_argpos ( ts , n ) \u00b6 get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ] to_pydatetime ( tm ) \u00b6 \u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch ) top_n_argpos ( ts , n ) \u00b6 get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"Extensions"},{"location":"api/extensions/#omicron.extensions.decimals","text":"","title":"decimals"},{"location":"api/extensions/#omicron.extensions.decimals.math_round","text":"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits )","title":"math_round()"},{"location":"api/extensions/#omicron.extensions.decimals.price_equal","text":"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2","title":"price_equal()"},{"location":"api/extensions/#omicron.extensions.np","text":"Extension function related to numpy","title":"np"},{"location":"api/extensions/#omicron.extensions.np.array_math_round","text":"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr )","title":"array_math_round()"},{"location":"api/extensions/#omicron.extensions.np.array_price_equal","text":"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2","title":"array_price_equal()"},{"location":"api/extensions/#omicron.extensions.np.bars_since","text":"Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default )","title":"bars_since()"},{"location":"api/extensions/#omicron.extensions.np.bin_cut","text":"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )]","title":"bin_cut()"},{"location":"api/extensions/#omicron.extensions.np.count_between","text":"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter","title":"count_between()"},{"location":"api/extensions/#omicron.extensions.np.dataframe_to_structured_array","text":"convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes )","title":"dataframe_to_structured_array()"},{"location":"api/extensions/#omicron.extensions.np.dict_to_numpy_array","text":"convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"fill_nan()"},{"location":"api/extensions/#omicron.extensions.np.find_runs","text":"Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths","title":"find_runs()"},{"location":"api/extensions/#omicron.extensions.np.floor","text":"\u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ]","title":"floor()"},{"location":"api/extensions/#omicron.extensions.np.join_by_left","text":"\u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret","title":"join_by_left()"},{"location":"api/extensions/#omicron.extensions.np.numpy_append_fields","text":"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))]","title":"remove_nan()"},{"location":"api/extensions/#omicron.extensions.np.replace_zero","text":"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"replace_zero()"},{"location":"api/extensions/#omicron.extensions.np.rolling","text":"\u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results )","title":"rolling()"},{"location":"api/extensions/#omicron.extensions.np.shift","text":"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ]","title":"shift()"},{"location":"api/extensions/#omicron.extensions.np.smallest_n_argpos","text":"get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ]","title":"smallest_n_argpos()"},{"location":"api/extensions/#omicron.extensions.np.to_pydatetime","text":"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch )","title":"to_pydatetime()"},{"location":"api/extensions/#omicron.extensions.np.top_n_argpos","text":"get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"top_n_argpos()"},{"location":"api/metrics/","text":"\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570\u3002 \u00b6 usage : 1 from empyrical import aggregate_returns aggregate_returns \u00b6 external link alpha \u00b6 external link alpha_aligned \u00b6 external link alpha_beta \u00b6 external link alpha_beta_aligned \u00b6 external link annual_return \u00b6 external link annual_volatility \u00b6 external link beta \u00b6 external link beta_aligned \u00b6 external link beta_fragility_heuristic \u00b6 external link beta_fragility_heuristic_aligned \u00b6 external link cagr \u00b6 external link calmar_ratio \u00b6 external link capture \u00b6 external link compute_exposures \u00b6 external link conditional_value_at_risk \u00b6 external link cum_returns \u00b6 external link cum_returns_final \u00b6 external link down_alpha_beta \u00b6 external link down_capture \u00b6 external link downside_risk \u00b6 external link excess_sharpe \u00b6 external link gpd_risk_estimates \u00b6 external link gpd_risk_estimates_aligned \u00b6 external link max_drawdown \u00b6 external link omega_ratio \u00b6 external link perf_attrib \u00b6 external link periods \u00b6 external link roll_alpha \u00b6 external link roll_alpha_aligned \u00b6 external link roll_alpha_beta \u00b6 external link roll_alpha_beta_aligned \u00b6 external link roll_annual_volatility \u00b6 external link roll_beta \u00b6 external link roll_beta_aligned \u00b6 external link roll_down_capture \u00b6 external link roll_max_drawdown \u00b6 external link roll_sharpe_ratio \u00b6 external link roll_sortino_ratio \u00b6 external link roll_up_capture \u00b6 external link roll_up_down_capture \u00b6 external link sharpe_ratio \u00b6 external link simple_returns \u00b6 external link sortino_ratio \u00b6 external link stability_of_timeseries \u00b6 external link stats \u00b6 external link tail_ratio \u00b6 external link up_alpha_beta \u00b6 external link up_capture \u00b6 external link up_down_capture \u00b6 external link utils \u00b6 external link value_at_risk \u00b6 external link","title":"metrics"},{"location":"api/metrics/#\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570","text":"usage : 1 from empyrical import aggregate_returns","title":"\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570\u3002"},{"location":"api/metrics/#aggregate_returns","text":"external link","title":"aggregate_returns"},{"location":"api/metrics/#alpha","text":"external link","title":"alpha"},{"location":"api/metrics/#alpha_aligned","text":"external link","title":"alpha_aligned"},{"location":"api/metrics/#alpha_beta","text":"external link","title":"alpha_beta"},{"location":"api/metrics/#alpha_beta_aligned","text":"external link","title":"alpha_beta_aligned"},{"location":"api/metrics/#annual_return","text":"external link","title":"annual_return"},{"location":"api/metrics/#annual_volatility","text":"external link","title":"annual_volatility"},{"location":"api/metrics/#beta","text":"external link","title":"beta"},{"location":"api/metrics/#beta_aligned","text":"external link","title":"beta_aligned"},{"location":"api/metrics/#beta_fragility_heuristic","text":"external link","title":"beta_fragility_heuristic"},{"location":"api/metrics/#beta_fragility_heuristic_aligned","text":"external link","title":"beta_fragility_heuristic_aligned"},{"location":"api/metrics/#cagr","text":"external link","title":"cagr"},{"location":"api/metrics/#calmar_ratio","text":"external link","title":"calmar_ratio"},{"location":"api/metrics/#capture","text":"external link","title":"capture"},{"location":"api/metrics/#compute_exposures","text":"external link","title":"compute_exposures"},{"location":"api/metrics/#conditional_value_at_risk","text":"external link","title":"conditional_value_at_risk"},{"location":"api/metrics/#cum_returns","text":"external link","title":"cum_returns"},{"location":"api/metrics/#cum_returns_final","text":"external link","title":"cum_returns_final"},{"location":"api/metrics/#down_alpha_beta","text":"external link","title":"down_alpha_beta"},{"location":"api/metrics/#down_capture","text":"external link","title":"down_capture"},{"location":"api/metrics/#downside_risk","text":"external link","title":"downside_risk"},{"location":"api/metrics/#excess_sharpe","text":"external link","title":"excess_sharpe"},{"location":"api/metrics/#gpd_risk_estimates","text":"external link","title":"gpd_risk_estimates"},{"location":"api/metrics/#gpd_risk_estimates_aligned","text":"external link","title":"gpd_risk_estimates_aligned"},{"location":"api/metrics/#max_drawdown","text":"external link","title":"max_drawdown"},{"location":"api/metrics/#omega_ratio","text":"external link","title":"omega_ratio"},{"location":"api/metrics/#perf_attrib","text":"external link","title":"perf_attrib"},{"location":"api/metrics/#periods","text":"external link","title":"periods"},{"location":"api/metrics/#roll_alpha","text":"external link","title":"roll_alpha"},{"location":"api/metrics/#roll_alpha_aligned","text":"external link","title":"roll_alpha_aligned"},{"location":"api/metrics/#roll_alpha_beta","text":"external link","title":"roll_alpha_beta"},{"location":"api/metrics/#roll_alpha_beta_aligned","text":"external link","title":"roll_alpha_beta_aligned"},{"location":"api/metrics/#roll_annual_volatility","text":"external link","title":"roll_annual_volatility"},{"location":"api/metrics/#roll_beta","text":"external link","title":"roll_beta"},{"location":"api/metrics/#roll_beta_aligned","text":"external link","title":"roll_beta_aligned"},{"location":"api/metrics/#roll_down_capture","text":"external link","title":"roll_down_capture"},{"location":"api/metrics/#roll_max_drawdown","text":"external link","title":"roll_max_drawdown"},{"location":"api/metrics/#roll_sharpe_ratio","text":"external link","title":"roll_sharpe_ratio"},{"location":"api/metrics/#roll_sortino_ratio","text":"external link","title":"roll_sortino_ratio"},{"location":"api/metrics/#roll_up_capture","text":"external link","title":"roll_up_capture"},{"location":"api/metrics/#roll_up_down_capture","text":"external link","title":"roll_up_down_capture"},{"location":"api/metrics/#sharpe_ratio","text":"external link","title":"sharpe_ratio"},{"location":"api/metrics/#simple_returns","text":"external link","title":"simple_returns"},{"location":"api/metrics/#sortino_ratio","text":"external link","title":"sortino_ratio"},{"location":"api/metrics/#stability_of_timeseries","text":"external link","title":"stability_of_timeseries"},{"location":"api/metrics/#stats","text":"external link","title":"stats"},{"location":"api/metrics/#tail_ratio","text":"external link","title":"tail_ratio"},{"location":"api/metrics/#up_alpha_beta","text":"external link","title":"up_alpha_beta"},{"location":"api/metrics/#up_capture","text":"external link","title":"up_capture"},{"location":"api/metrics/#up_down_capture","text":"external link","title":"up_down_capture"},{"location":"api/metrics/#utils","text":"external link","title":"utils"},{"location":"api/metrics/#value_at_risk","text":"external link","title":"value_at_risk"},{"location":"api/omicron/","text":"Omicron\u63d0\u4f9b\u6570\u636e\u6301\u4e45\u5316\u3001\u65f6\u95f4\uff08\u65e5\u5386\u3001triggers)\u3001\u884c\u60c5\u6570\u636emodel\u3001\u57fa\u7840\u8fd0\u7b97\u548c\u57fa\u7840\u91cf\u5316\u56e0\u5b50 close () async \u00b6 \u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5 Source code in omicron/__init__.py async def close (): \"\"\"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5\"\"\" try : await cache . close () except Exception as e : # noqa pass init ( app_cache = 5 ) async \u00b6 \u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528 close() \u5173\u95ed Source code in omicron/__init__.py async def init ( app_cache : int = 5 ): \"\"\"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528`close()`\u5173\u95ed \"\"\" global cache await cache . init ( app = app_cache ) await tf . init () from omicron.models.security import Security await Security . init () Extensions package \u00b6 decimals \u00b6 math_round ( x , digits ) \u00b6 \u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits ) price_equal ( x , y ) \u00b6 \u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2 np \u00b6 Extension function related to numpy array_math_round ( arr , digits ) \u00b6 \u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr ) array_price_equal ( price1 , price2 ) \u00b6 \u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2 bars_since ( condition , default = None ) \u00b6 Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default ) bin_cut ( arr , n ) \u00b6 \u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )] count_between ( arr , start , end ) \u00b6 \u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter dataframe_to_structured_array ( df , dtypes = None ) \u00b6 convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes ) dict_to_numpy_array ( d , dtype ) \u00b6 convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] find_runs ( x ) \u00b6 Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths floor ( arr , item ) \u00b6 \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ] join_by_left ( key , r1 , r2 , mask = True ) \u00b6 \u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret numpy_append_fields ( base , names , data , dtypes ) \u00b6 \u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))] replace_zero ( ts , replacement = None ) \u00b6 \u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] rolling ( x , win , func ) \u00b6 \u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results ) shift ( arr , start , offset ) \u00b6 \u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ] smallest_n_argpos ( ts , n ) \u00b6 get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ] to_pydatetime ( tm ) \u00b6 \u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch ) top_n_argpos ( ts , n ) \u00b6 get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ] Notify package \u00b6 dingtalk \u00b6 DingTalkMessage \u00b6 \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx Source code in omicron/notify/dingtalk.py class DingTalkMessage : \"\"\" \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx \"\"\" url = \"https://oapi.dingtalk.com/robot/send\" @classmethod def _get_access_token ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684access_token\"\"\" if hasattr ( cfg . notify , \"dingtalk_access_token\" ): return cfg . notify . dingtalk_access_token else : logger . error ( \"Dingtalk not configured, please add the following items: \\n \" \"notify: \\n \" \" dingtalk_access_token: xxxx \\n \" \" dingtalk_secret: xxxx \\n \" ) raise ConfigError ( \"dingtalk_access_token not found\" ) @classmethod def _get_secret ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684secret\"\"\" if hasattr ( cfg . notify , \"dingtalk_secret\" ): return cfg . notify . dingtalk_secret else : return None @classmethod def _get_url ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684\u6d88\u606f\u63a8\u9001\u5730\u5740\uff0c\u5c06\u7b7e\u540d\u548c\u65f6\u95f4\u6233\u62fc\u63a5\u5728url\u540e\u9762\"\"\" access_token = cls . _get_access_token () url = f \" { cls . url } ?access_token= { access_token } \" secret = cls . _get_secret () if secret : timestamp , sign = cls . _get_sign ( secret ) url = f \" { url } ×tamp= { timestamp } &sign= { sign } \" return url @classmethod def _get_sign ( cls , secret : str ): \"\"\"\u83b7\u53d6\u7b7e\u540d\u53d1\u9001\u7ed9\u9489\u9489\u673a\u5668\u4eba\"\"\" timestamp = str ( round ( time . time () * 1000 )) secret_enc = secret . encode ( \"utf-8\" ) string_to_sign = \" {} \\n {} \" . format ( timestamp , secret ) string_to_sign_enc = string_to_sign . encode ( \"utf-8\" ) hmac_code = hmac . new ( secret_enc , string_to_sign_enc , digestmod = hashlib . sha256 ) . digest () sign = urllib . parse . quote_plus ( base64 . b64encode ( hmac_code )) return timestamp , sign @classmethod def _send ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () response = httpx . post ( url , json = msg , timeout = 30 ) if response . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { response . content . decode () } \" ) return rsp = json . loads ( response . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return response . content . decode () @classmethod async def _send_async ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () async with httpx . AsyncClient () as client : r = await client . post ( url , json = msg , timeout = 30 ) if r . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { r . content . decode () } \" ) return rsp = json . loads ( r . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return r . content . decode () @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg ) text ( cls , content ) classmethod \u00b6 .. deprecated:: 2.0.0 use function ding instead Source code in omicron/notify/dingtalk.py @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg ) ding ( msg ) \u00b6 \u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1 \u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg Union[str, dict] \u5f85\u53d1\u9001\u6d88\u606f\u3002 required Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/dingtalk.py def ding ( msg : Union [ str , dict ]) -> Awaitable : \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1[\u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863](https://open.dingtalk.com/document/orgapp-server/message-type) ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg: \u5f85\u53d1\u9001\u6d88\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if isinstance ( msg , str ): msg_ = { \"text\" : { \"content\" : msg }, \"msgtype\" : \"text\" } elif isinstance ( msg , dict ): msg_ = { \"msgtype\" : \"markdown\" , \"markdown\" : { \"title\" : msg [ \"title\" ], \"text\" : msg [ \"text\" ]}, } else : raise TypeError task = asyncio . create_task ( DingTalkMessage . _send_async ( msg_ )) return task mail \u00b6 compose ( subject , plain_txt = None , html = None , attachment = None ) \u00b6 \u7f16\u5199MIME\u90ae\u4ef6\u3002 Parameters: Name Type Description Default subject str \u90ae\u4ef6\u4e3b\u9898 required plain_txt str \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 None html str html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. None attachment str \u9644\u4ef6\u6587\u4ef6\u540d None Returns: Type Description EmailMessage MIME mail Source code in omicron/notify/mail.py def compose ( subject : str , plain_txt : str = None , html : str = None , attachment : str = None ) -> EmailMessage : \"\"\"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Args: subject (str): \u90ae\u4ef6\u4e3b\u9898 plain_txt (str): \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 html (str, optional): html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. attachment (str, optional): \u9644\u4ef6\u6587\u4ef6\u540d Returns: MIME mail \"\"\" msg = EmailMessage () msg [ \"Subject\" ] = subject if html : msg . preamble = plain_txt or \"\" msg . set_content ( html , subtype = \"html\" ) else : assert plain_txt , \"Either plain_txt or html is required.\" msg . set_content ( plain_txt ) if attachment : ctype , encoding = mimetypes . guess_type ( attachment ) if ctype is None or encoding is not None : ctype = \"application/octet-stream\" maintype , subtype = ctype . split ( \"/\" , 1 ) with open ( attachment , \"rb\" ) as f : msg . add_attachment ( f . read (), maintype = maintype , subtype = subtype , filename = attachment ) return msg mail_notify ( subject = None , body = None , msg = None , html = False , receivers = None ) \u00b6 \u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a 1 2 3 4 5 notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf MAIL_PASSWORD \u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg EmailMessage [description]. Defaults to None. None subject str [description]. Defaults to None. None body str [description]. Defaults to None. None html bool body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. False receivers List[str], Optional \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py def mail_notify ( subject : str = None , body : str = None , msg : EmailMessage = None , html = False , receivers = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a ``` notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com ``` \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf`MAIL_PASSWORD`\u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg (EmailMessage, optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. body (str, optional): [description]. Defaults to None. html (bool, optional): body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. receivers (List[str], Optional): \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject or body ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) if msg is None : if html : msg = compose ( subject , html = body ) else : msg = compose ( subject , plain_txt = body ) cfg = cfg4py . get_instance () if not receivers : receivers = cfg . notify . mail_to password = os . environ . get ( \"MAIL_PASSWORD\" ) return send_mail ( cfg . notify . mail_from , receivers , password , msg , host = cfg . notify . mail_server ) send_mail ( sender , receivers , password , msg = None , host = None , port = 25 , cc = None , bcc = None , subject = None , body = None , username = None ) \u00b6 \u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default sender str [description] required receivers List[str] [description] required msg EmailMessage [description]. Defaults to None. None host str [description]. Defaults to None. None port int [description]. Defaults to 25. 25 cc List[str] [description]. Defaults to None. None bcc List[str] [description]. Defaults to None. None subject str [description]. Defaults to None. None plain str [description]. Defaults to None. required username str the username used to logon to mail server. if not provided, then sender is used. None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py @retry ( aiosmtplib . errors . SMTPConnectError , tries = 3 , backoff = 2 , delay = 30 , logger = logger ) def send_mail ( sender : str , receivers : List [ str ], password : str , msg : EmailMessage = None , host : str = None , port : int = 25 , cc : List [ str ] = None , bcc : List [ str ] = None , subject : str = None , body : str = None , username : str = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: sender (str): [description] receivers (List[str]): [description] msg (EmailMessage, optional): [description]. Defaults to None. host (str, optional): [description]. Defaults to None. port (int, optional): [description]. Defaults to 25. cc (List[str], optional): [description]. Defaults to None. bcc (List[str], optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. plain (str, optional): [description]. Defaults to None. username (str, optional): the username used to logon to mail server. if not provided, then `sender` is used. Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject is not None or body is not None ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) msg = msg or EmailMessage () if isinstance ( receivers , str ): receivers = [ receivers ] msg [ \"From\" ] = sender msg [ \"To\" ] = \", \" . join ( receivers ) if subject : msg [ \"subject\" ] = subject if body : msg . set_content ( body ) if cc : msg [ \"Cc\" ] = \", \" . join ( cc ) if bcc : msg [ \"Bcc\" ] = \", \" . join ( bcc ) username = username or sender if host is None : host = sender . split ( \"@\" )[ - 1 ] task = asyncio . create_task ( aiosmtplib . send ( msg , hostname = host , port = port , username = sender , password = password ) ) return task Backtesting Log Facility \u00b6 Info Since 2.0.0.a76 \u56de\u6d4b\u65f6\uff0c\u6253\u5370\u65f6\u95f4\u4e00\u822c\u8981\u6c42\u4e3a\u56de\u6d4b\u5f53\u65f6\u7684\u65f6\u95f4\uff0c\u800c\u975e\u7cfb\u7edf\u65f6\u95f4\u3002\u8fd9\u4e2a\u6a21\u5757\u63d0\u4f9b\u4e86\u6539\u5199\u65e5\u5fd7\u65f6\u95f4\u7684\u529f\u80fd\u3002 \u4f7f\u7528\u65b9\u6cd5\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 from omicron.core.backtestlog import BacktestLogger logger = BacktestLogger . getLogger ( __name__ ) logger . setLevel ( logging . INFO ) handler = logging . StreamHandler () # \u901a\u8fc7bt_date\u57df\u6765\u8bbe\u7f6e\u65e5\u671f\uff0c\u800c\u4e0d\u662fasctime handler . setFormatter ( Formatter ( \" %(bt_date)s %(message)s \" )) logging . basicConfig ( level = logging . INFO , handlers = [ handler ]) # \u8c03\u7528\u65f6\u4e0e\u666e\u901a\u65e5\u5fd7\u4e00\u6837\uff0c\u4f46\u8981\u589e\u52a0\u4e00\u4e2adate\u53c2\u6570 logger . info ( \"this is info\" , date = datetime . date ( 2022 , 3 , 1 )) \u4e0a\u8ff0\u4ee3\u7801\u5c06\u8f93\u51fa\uff1a 1 2022-03-01 this is info \u4f7f\u7528\u672c\u65e5\u5fd7\u7684\u6838\u5fc3\u662f\u4e0a\u8ff0\u4ee3\u7801\u4e2d\u7684\u7b2c3\u884c\u548c\u7b2c9\u884c\uff0c\u6700\u540e\uff0c\u5728\u8f93\u51fa\u65e5\u5fd7\u65f6\u52a0\u4e0a date=... \uff0c\u5982\u7b2c15\u884c\u6240\u793a\u3002 \u6ce8\u610f\u5728\u7b2c9\u884c\uff0c\u901a\u5e38\u662f logging.getLogger(__nam__) \uff0c\u800c\u8fd9\u91cc\u662f BacktestLogger.getLogger(__name__) \u5982\u679c\u4e0a\u8ff0\u8c03\u7528\u4e2d\u6ca1\u6709\u4f20\u5165 date \uff0c\u5219\u5c06\u4f7f\u7528\u8c03\u7528\u65f6\u95f4\uff0c\u6b64\u65f6\u884c\u4e3a\u8ddf\u539f\u65e5\u5fd7\u7cfb\u7edf\u4e00\u81f4\u3002 Warning \u5f53\u8c03\u7528logger.exception\u65f6\uff0c\u4e0d\u80fd\u4f20\u5165date\u53c2\u6570\u3002 \u914d\u7f6e\u6587\u4ef6\u793a\u4f8b \u00b6 \u5982\u679c\u8981\u901a\u8fc7\u914d\u7f6e\u6587\u4ef6\u6765\u914d\u7f6e\uff0c\u53ef\u4f7f\u7528\u4ee5\u4e0b\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 formatters : backtest : format : '%(bt_date)s | %(message)s' handlers : backtest : class : logging.StreamHandler formatter : backtest omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false loggers : omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false","title":"omicron"},{"location":"api/omicron/#omicron.close","text":"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5 Source code in omicron/__init__.py async def close (): \"\"\"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5\"\"\" try : await cache . close () except Exception as e : # noqa pass","title":"close()"},{"location":"api/omicron/#omicron.init","text":"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528 close() \u5173\u95ed Source code in omicron/__init__.py async def init ( app_cache : int = 5 ): \"\"\"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528`close()`\u5173\u95ed \"\"\" global cache await cache . init ( app = app_cache ) await tf . init () from omicron.models.security import Security await Security . init ()","title":"init()"},{"location":"api/omicron/#extensions-package","text":"","title":"Extensions package"},{"location":"api/omicron/#omicron.extensions.decimals","text":"","title":"decimals"},{"location":"api/omicron/#omicron.extensions.decimals.math_round","text":"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits )","title":"math_round()"},{"location":"api/omicron/#omicron.extensions.decimals.price_equal","text":"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2","title":"price_equal()"},{"location":"api/omicron/#omicron.extensions.np","text":"Extension function related to numpy","title":"np"},{"location":"api/omicron/#omicron.extensions.np.array_math_round","text":"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr )","title":"array_math_round()"},{"location":"api/omicron/#omicron.extensions.np.array_price_equal","text":"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2","title":"array_price_equal()"},{"location":"api/omicron/#omicron.extensions.np.bars_since","text":"Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default )","title":"bars_since()"},{"location":"api/omicron/#omicron.extensions.np.bin_cut","text":"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )]","title":"bin_cut()"},{"location":"api/omicron/#omicron.extensions.np.count_between","text":"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter","title":"count_between()"},{"location":"api/omicron/#omicron.extensions.np.dataframe_to_structured_array","text":"convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes )","title":"dataframe_to_structured_array()"},{"location":"api/omicron/#omicron.extensions.np.dict_to_numpy_array","text":"convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"fill_nan()"},{"location":"api/omicron/#omicron.extensions.np.find_runs","text":"Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths","title":"find_runs()"},{"location":"api/omicron/#omicron.extensions.np.floor","text":"\u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ]","title":"floor()"},{"location":"api/omicron/#omicron.extensions.np.join_by_left","text":"\u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret","title":"join_by_left()"},{"location":"api/omicron/#omicron.extensions.np.numpy_append_fields","text":"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))]","title":"remove_nan()"},{"location":"api/omicron/#omicron.extensions.np.replace_zero","text":"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"replace_zero()"},{"location":"api/omicron/#omicron.extensions.np.rolling","text":"\u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results )","title":"rolling()"},{"location":"api/omicron/#omicron.extensions.np.shift","text":"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ]","title":"shift()"},{"location":"api/omicron/#omicron.extensions.np.smallest_n_argpos","text":"get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ]","title":"smallest_n_argpos()"},{"location":"api/omicron/#omicron.extensions.np.to_pydatetime","text":"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch )","title":"to_pydatetime()"},{"location":"api/omicron/#omicron.extensions.np.top_n_argpos","text":"get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"top_n_argpos()"},{"location":"api/omicron/#notify-package","text":"","title":"Notify package"},{"location":"api/omicron/#omicron.notify.dingtalk","text":"","title":"dingtalk"},{"location":"api/omicron/#omicron.notify.dingtalk.DingTalkMessage","text":"\u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx Source code in omicron/notify/dingtalk.py class DingTalkMessage : \"\"\" \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx \"\"\" url = \"https://oapi.dingtalk.com/robot/send\" @classmethod def _get_access_token ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684access_token\"\"\" if hasattr ( cfg . notify , \"dingtalk_access_token\" ): return cfg . notify . dingtalk_access_token else : logger . error ( \"Dingtalk not configured, please add the following items: \\n \" \"notify: \\n \" \" dingtalk_access_token: xxxx \\n \" \" dingtalk_secret: xxxx \\n \" ) raise ConfigError ( \"dingtalk_access_token not found\" ) @classmethod def _get_secret ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684secret\"\"\" if hasattr ( cfg . notify , \"dingtalk_secret\" ): return cfg . notify . dingtalk_secret else : return None @classmethod def _get_url ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684\u6d88\u606f\u63a8\u9001\u5730\u5740\uff0c\u5c06\u7b7e\u540d\u548c\u65f6\u95f4\u6233\u62fc\u63a5\u5728url\u540e\u9762\"\"\" access_token = cls . _get_access_token () url = f \" { cls . url } ?access_token= { access_token } \" secret = cls . _get_secret () if secret : timestamp , sign = cls . _get_sign ( secret ) url = f \" { url } ×tamp= { timestamp } &sign= { sign } \" return url @classmethod def _get_sign ( cls , secret : str ): \"\"\"\u83b7\u53d6\u7b7e\u540d\u53d1\u9001\u7ed9\u9489\u9489\u673a\u5668\u4eba\"\"\" timestamp = str ( round ( time . time () * 1000 )) secret_enc = secret . encode ( \"utf-8\" ) string_to_sign = \" {} \\n {} \" . format ( timestamp , secret ) string_to_sign_enc = string_to_sign . encode ( \"utf-8\" ) hmac_code = hmac . new ( secret_enc , string_to_sign_enc , digestmod = hashlib . sha256 ) . digest () sign = urllib . parse . quote_plus ( base64 . b64encode ( hmac_code )) return timestamp , sign @classmethod def _send ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () response = httpx . post ( url , json = msg , timeout = 30 ) if response . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { response . content . decode () } \" ) return rsp = json . loads ( response . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return response . content . decode () @classmethod async def _send_async ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () async with httpx . AsyncClient () as client : r = await client . post ( url , json = msg , timeout = 30 ) if r . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { r . content . decode () } \" ) return rsp = json . loads ( r . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return r . content . decode () @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg )","title":"DingTalkMessage"},{"location":"api/omicron/#omicron.notify.dingtalk.DingTalkMessage.text","text":".. deprecated:: 2.0.0 use function ding instead Source code in omicron/notify/dingtalk.py @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg )","title":"text()"},{"location":"api/omicron/#omicron.notify.dingtalk.ding","text":"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1 \u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg Union[str, dict] \u5f85\u53d1\u9001\u6d88\u606f\u3002 required Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/dingtalk.py def ding ( msg : Union [ str , dict ]) -> Awaitable : \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1[\u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863](https://open.dingtalk.com/document/orgapp-server/message-type) ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg: \u5f85\u53d1\u9001\u6d88\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if isinstance ( msg , str ): msg_ = { \"text\" : { \"content\" : msg }, \"msgtype\" : \"text\" } elif isinstance ( msg , dict ): msg_ = { \"msgtype\" : \"markdown\" , \"markdown\" : { \"title\" : msg [ \"title\" ], \"text\" : msg [ \"text\" ]}, } else : raise TypeError task = asyncio . create_task ( DingTalkMessage . _send_async ( msg_ )) return task","title":"ding()"},{"location":"api/omicron/#omicron.notify.mail","text":"","title":"mail"},{"location":"api/omicron/#omicron.notify.mail.compose","text":"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Parameters: Name Type Description Default subject str \u90ae\u4ef6\u4e3b\u9898 required plain_txt str \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 None html str html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. None attachment str \u9644\u4ef6\u6587\u4ef6\u540d None Returns: Type Description EmailMessage MIME mail Source code in omicron/notify/mail.py def compose ( subject : str , plain_txt : str = None , html : str = None , attachment : str = None ) -> EmailMessage : \"\"\"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Args: subject (str): \u90ae\u4ef6\u4e3b\u9898 plain_txt (str): \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 html (str, optional): html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. attachment (str, optional): \u9644\u4ef6\u6587\u4ef6\u540d Returns: MIME mail \"\"\" msg = EmailMessage () msg [ \"Subject\" ] = subject if html : msg . preamble = plain_txt or \"\" msg . set_content ( html , subtype = \"html\" ) else : assert plain_txt , \"Either plain_txt or html is required.\" msg . set_content ( plain_txt ) if attachment : ctype , encoding = mimetypes . guess_type ( attachment ) if ctype is None or encoding is not None : ctype = \"application/octet-stream\" maintype , subtype = ctype . split ( \"/\" , 1 ) with open ( attachment , \"rb\" ) as f : msg . add_attachment ( f . read (), maintype = maintype , subtype = subtype , filename = attachment ) return msg","title":"compose()"},{"location":"api/omicron/#omicron.notify.mail.mail_notify","text":"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a 1 2 3 4 5 notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf MAIL_PASSWORD \u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg EmailMessage [description]. Defaults to None. None subject str [description]. Defaults to None. None body str [description]. Defaults to None. None html bool body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. False receivers List[str], Optional \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py def mail_notify ( subject : str = None , body : str = None , msg : EmailMessage = None , html = False , receivers = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a ``` notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com ``` \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf`MAIL_PASSWORD`\u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg (EmailMessage, optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. body (str, optional): [description]. Defaults to None. html (bool, optional): body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. receivers (List[str], Optional): \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject or body ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) if msg is None : if html : msg = compose ( subject , html = body ) else : msg = compose ( subject , plain_txt = body ) cfg = cfg4py . get_instance () if not receivers : receivers = cfg . notify . mail_to password = os . environ . get ( \"MAIL_PASSWORD\" ) return send_mail ( cfg . notify . mail_from , receivers , password , msg , host = cfg . notify . mail_server )","title":"mail_notify()"},{"location":"api/omicron/#omicron.notify.mail.send_mail","text":"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default sender str [description] required receivers List[str] [description] required msg EmailMessage [description]. Defaults to None. None host str [description]. Defaults to None. None port int [description]. Defaults to 25. 25 cc List[str] [description]. Defaults to None. None bcc List[str] [description]. Defaults to None. None subject str [description]. Defaults to None. None plain str [description]. Defaults to None. required username str the username used to logon to mail server. if not provided, then sender is used. None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py @retry ( aiosmtplib . errors . SMTPConnectError , tries = 3 , backoff = 2 , delay = 30 , logger = logger ) def send_mail ( sender : str , receivers : List [ str ], password : str , msg : EmailMessage = None , host : str = None , port : int = 25 , cc : List [ str ] = None , bcc : List [ str ] = None , subject : str = None , body : str = None , username : str = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: sender (str): [description] receivers (List[str]): [description] msg (EmailMessage, optional): [description]. Defaults to None. host (str, optional): [description]. Defaults to None. port (int, optional): [description]. Defaults to 25. cc (List[str], optional): [description]. Defaults to None. bcc (List[str], optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. plain (str, optional): [description]. Defaults to None. username (str, optional): the username used to logon to mail server. if not provided, then `sender` is used. Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject is not None or body is not None ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) msg = msg or EmailMessage () if isinstance ( receivers , str ): receivers = [ receivers ] msg [ \"From\" ] = sender msg [ \"To\" ] = \", \" . join ( receivers ) if subject : msg [ \"subject\" ] = subject if body : msg . set_content ( body ) if cc : msg [ \"Cc\" ] = \", \" . join ( cc ) if bcc : msg [ \"Bcc\" ] = \", \" . join ( bcc ) username = username or sender if host is None : host = sender . split ( \"@\" )[ - 1 ] task = asyncio . create_task ( aiosmtplib . send ( msg , hostname = host , port = port , username = sender , password = password ) ) return task","title":"send_mail()"},{"location":"api/omicron/#backtesting-log-facility","text":"Info Since 2.0.0.a76 \u56de\u6d4b\u65f6\uff0c\u6253\u5370\u65f6\u95f4\u4e00\u822c\u8981\u6c42\u4e3a\u56de\u6d4b\u5f53\u65f6\u7684\u65f6\u95f4\uff0c\u800c\u975e\u7cfb\u7edf\u65f6\u95f4\u3002\u8fd9\u4e2a\u6a21\u5757\u63d0\u4f9b\u4e86\u6539\u5199\u65e5\u5fd7\u65f6\u95f4\u7684\u529f\u80fd\u3002 \u4f7f\u7528\u65b9\u6cd5\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 from omicron.core.backtestlog import BacktestLogger logger = BacktestLogger . getLogger ( __name__ ) logger . setLevel ( logging . INFO ) handler = logging . StreamHandler () # \u901a\u8fc7bt_date\u57df\u6765\u8bbe\u7f6e\u65e5\u671f\uff0c\u800c\u4e0d\u662fasctime handler . setFormatter ( Formatter ( \" %(bt_date)s %(message)s \" )) logging . basicConfig ( level = logging . INFO , handlers = [ handler ]) # \u8c03\u7528\u65f6\u4e0e\u666e\u901a\u65e5\u5fd7\u4e00\u6837\uff0c\u4f46\u8981\u589e\u52a0\u4e00\u4e2adate\u53c2\u6570 logger . info ( \"this is info\" , date = datetime . date ( 2022 , 3 , 1 )) \u4e0a\u8ff0\u4ee3\u7801\u5c06\u8f93\u51fa\uff1a 1 2022-03-01 this is info \u4f7f\u7528\u672c\u65e5\u5fd7\u7684\u6838\u5fc3\u662f\u4e0a\u8ff0\u4ee3\u7801\u4e2d\u7684\u7b2c3\u884c\u548c\u7b2c9\u884c\uff0c\u6700\u540e\uff0c\u5728\u8f93\u51fa\u65e5\u5fd7\u65f6\u52a0\u4e0a date=... \uff0c\u5982\u7b2c15\u884c\u6240\u793a\u3002 \u6ce8\u610f\u5728\u7b2c9\u884c\uff0c\u901a\u5e38\u662f logging.getLogger(__nam__) \uff0c\u800c\u8fd9\u91cc\u662f BacktestLogger.getLogger(__name__) \u5982\u679c\u4e0a\u8ff0\u8c03\u7528\u4e2d\u6ca1\u6709\u4f20\u5165 date \uff0c\u5219\u5c06\u4f7f\u7528\u8c03\u7528\u65f6\u95f4\uff0c\u6b64\u65f6\u884c\u4e3a\u8ddf\u539f\u65e5\u5fd7\u7cfb\u7edf\u4e00\u81f4\u3002 Warning \u5f53\u8c03\u7528logger.exception\u65f6\uff0c\u4e0d\u80fd\u4f20\u5165date\u53c2\u6570\u3002","title":"Backtesting Log Facility"},{"location":"api/omicron/#omicron.core.backtestlog--\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b","text":"\u5982\u679c\u8981\u901a\u8fc7\u914d\u7f6e\u6587\u4ef6\u6765\u914d\u7f6e\uff0c\u53ef\u4f7f\u7528\u4ee5\u4e0b\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 formatters : backtest : format : '%(bt_date)s | %(message)s' handlers : backtest : class : logging.StreamHandler formatter : backtest omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false loggers : omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false","title":"\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b"},{"location":"api/security/","text":"Query \u00b6 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531 Security.select() \u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7 eval \u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 Source code in omicron/models/security.py class Query : \"\"\"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531`Security.select()`\u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7`eval`\u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 \"\"\" def __init__ ( self , target_date : datetime . date = None ): if target_date is None : # \u805a\u5bbd\u4e0d\u4e00\u5b9a\u4f1a\u53ca\u65f6\u66f4\u65b0\u6570\u636e\uff0c\u56e0\u6b64db\u4e2d\u4e0d\u5b58\u653e\u5f53\u5929\u7684\u6570\u636e\uff0c\u5982\u679c\u4f20\u7a7a\uff0c\u67e5cache self . target_date = None else : # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u53d6\u5f53\u5929\uff0c\u5426\u5219\u53d6\u524d\u4e00\u5929 self . target_date = tf . day_shift ( target_date , 0 ) # \u540d\u5b57\uff0c\u663e\u793a\u540d\uff0c\u7c7b\u578b\u8fc7\u6ee4\u5668 self . _name_pattern = None # \u5b57\u6bcd\u540d\u5b57 self . _alias_pattern = None # \u663e\u793a\u540d self . _type_pattern = None # \u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4e3a\u5168\u90e8\uff0c\u5982\u679c\u4f20\u5165\u7a7a\u503c\u5219\u53ea\u9009\u62e9\u80a1\u7968\u548c\u6307\u6570 # \u5f00\u5173\u9009\u9879 self . _exclude_kcb = False # \u79d1\u521b\u677f self . _exclude_cyb = False # \u521b\u4e1a\u677f self . _exclude_st = False # ST self . _include_exit = False # \u662f\u5426\u5305\u542b\u5df2\u9000\u5e02\u8bc1\u5238(\u9ed8\u8ba4\u4e0d\u5305\u62ec\u5f53\u5929\u9000\u5e02\u7684) # \u4e0b\u5217\u5f00\u5173\u4f18\u5148\u7ea7\u9ad8\u4e8e\u4e0a\u9762\u7684 self . _only_kcb = False self . _only_cyb = False self . _only_st = False def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results alias_like ( self , display_name ) \u00b6 \u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Parameters: Name Type Description Default display_name str \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" required Source code in omicron/models/security.py def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self eval ( self ) async \u00b6 \u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: Type Description List[str] \u4ee3\u7801\u5217\u8868 Source code in omicron/models/security.py async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results exclude_cyb ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self exclude_kcb ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self exclude_st ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self include_exit ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238 Source code in omicron/models/security.py def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self name_like ( self , name ) \u00b6 \u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0 name \u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Parameters: Name Type Description Default name str \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" required Source code in omicron/models/security.py def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self only_cyb ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self only_kcb ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self only_st ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238 Source code in omicron/models/security.py def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self types ( self , types ) \u00b6 \u9009\u62e9\u7c7b\u578b\u5728 types \u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Parameters: Name Type Description Default types List[str] \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 required Source code in omicron/models/security.py def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self Security \u00b6 Source code in omicron/models/security.py class Security : _securities = [] _securities_date = None _security_types = set () _stocks = [] @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None @classmethod async def get_security_types ( cls ): if cls . _security_types : return list ( cls . _security_types ) else : return None @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None @classmethod def fuzzy_match_ex ( cls , query : str ) -> Dict [ str , Tuple ]: # fixme: \u6b64\u65b9\u6cd5\u4e0eStock.fuzzy_match\u91cd\u590d\uff0c\u5e76\u4e14\u8fdb\u884c\u4e86\u7c7b\u578b\u9650\u5236\uff0c\u4f7f\u5f97\u5176\u4e0d\u9002\u5408\u653e\u5728Security\u91cc\uff0c\u4ee5\u53ca\u4f5c\u4e3a\u4e00\u4e2a\u901a\u7528\u65b9\u6cd5 query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"code\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"name\" ] . startswith ( query ) and sec [ \"type\" ] == \"stock\" } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"alias\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } @classmethod async def info ( cls , code , date = None ): _obj = await cls . query_security_via_date ( code , date ) if _obj is None : return None # \"_time\", \"code\", \"type\", \"alias\", \"end\", \"ipo\", \"name\" d1 = convert_nptime_to_datetime ( _obj [ \"ipo\" ]) . date () d2 = convert_nptime_to_datetime ( _obj [ \"end\" ]) . date () return { \"type\" : _obj [ \"type\" ], \"display_name\" : _obj [ \"alias\" ], \"alias\" : _obj [ \"alias\" ], \"end\" : d2 , \"start\" : d1 , \"name\" : _obj [ \"name\" ], } @classmethod async def name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"name\" ] @classmethod async def alias ( cls , code , date = None ): return await cls . display_name ( code , date ) @classmethod async def display_name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"alias\" ] @classmethod async def start_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"ipo\" ]) . date () @classmethod async def end_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"end\" ]) . date () @classmethod async def security_type ( cls , code , date = None ) -> SecurityType : _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"type\" ] @classmethod async def query_security_via_date ( cls , code : str , date : datetime . date = None ): if date is None : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : date = arrow . get ( date_in_cache ) . date () if date > cls . _securities_date : await cls . load_securities () results = cls . _securities [ cls . _securities [ \"code\" ] == code ] else : # \u4eceinfluxdb\u67e5\u627e date = tf . day_shift ( date , 0 ) results = await cls . load_securities_from_db ( date , code ) if results is not None and len ( results ) > 0 : return results [ 0 ] else : return None @classmethod def select ( cls , date : datetime . date = None ) -> Query : if date is None : return Query ( target_date = None ) else : return Query ( target_date = date ) @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" )) @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) @classmethod async def load_securities_from_db ( cls , target_date : datetime . date , code : str = None ): if target_date is None : return None client = get_influx_client () measurement = \"security_list\" flux = ( Flux () . measurement ( measurement ) . range ( target_date , target_date ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : # \"_time\", \"code\", \"code, alias, name, start, end, type\" _securities = np . array ( [ tuple ( x [ \"info\" ] . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) return _securities else : return None @classmethod async def get_datescope_from_db ( cls ): # fixme: \u51fd\u6570\u540d\u65e0\u6cd5\u53cd\u6620\u7528\u9014\uff0c\u9700\u8981\u589e\u52a0\u6587\u6863\u6ce8\u91ca\uff0c\u8bf4\u660e\u8be5\u51fd\u6570\u7684\u4f5c\u7528,\u6216\u8005\u4e0d\u5e94\u8be5\u51fa\u73b0\u5728\u6b64\u7c7b\u4e2d\uff1f client = get_influx_client () measurement = \"security_list\" date1 = arrow . get ( \"2005-01-01\" ) . date () date2 = arrow . now () . naive . date () flux = ( Flux () . measurement ( measurement ) . range ( date1 , date2 ) . bucket ( client . _bucket ) . tags ({ \"code\" : \"000001.XSHE\" }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None , None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : d1 = convert_nptime_to_datetime ( secs [ 0 ][ \"_time\" ]) d2 = convert_nptime_to_datetime ( secs [ len ( secs ) - 1 ][ \"_time\" ]) return d1 . date (), d2 . date () else : return None , None @classmethod async def _notify_special_bonusnote ( cls , code , note , cancel_date ): # fixme: \u8fd9\u4e2a\u51fd\u6570\u5e94\u8be5\u51fa\u73b0\u5728omega\u4e2d\uff1f default_cancel_date = datetime . date ( 2099 , 1 , 1 ) # \u9ed8\u8ba4\u65e0\u53d6\u6d88\u516c\u544a # report this special event to notify user if cancel_date != default_cancel_date : ding ( \"security %s , bonus_cancel_pub_date %s \" % ( code , cancel_date )) if note . find ( \"\u6d41\u901a\" ) != - 1 : # \u68c0\u67e5\u662f\u5426\u6709\u201c\u6d41\u901a\u80a1\u201d\u6587\u5b57 ding ( \"security %s , special xrxd note: %s \" % ( code , note )) @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) @classmethod async def _load_xrxd_from_db ( cls , code , dt_start : datetime . date , dt_end : datetime . date ): if dt_start is None or dt_end is None : return [] client = get_influx_client () measurement = \"security_xrxd_reports\" flux = ( Flux () . measurement ( measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return [] ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : _reports = np . array ( [ tuple ( x [ \"info\" ] . split ( \"|\" )) for x in secs ], dtype = xrxd_info_dtype ) return _reports else : return [] @classmethod async def get_xrxd_info ( cls , dt : datetime . date , code : str = None ): if dt is None : return None # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) reports = await cls . _load_xrxd_from_db ( code , dt , dt ) if len ( reports ) == 0 : return None readable_reports = [] for report in reports : xr_date = convert_nptime_to_datetime ( report [ 1 ]) . date () readable_reports . append ( { \"code\" : report [ 0 ], \"xr_date\" : xr_date , \"bonus\" : report [ 3 ], \"dividend\" : report [ 4 ], \"transfer\" : report [ 5 ], \"bonusnote\" : report [ 2 ], } ) return readable_reports get_stock ( code ) classmethod \u00b6 \u6839\u636e code \u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 1 2 item = Security . get_stock ( \"000001.XSHE\" ) print ( item [ \"alias\" ]) \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Parameters: Name Type Description Default code \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 required Returns: Type Description numpy.ndarray[Any, numpy.dtype[[('code', 'O'), ('alias', 'O'), ('name', 'O'), ('ipo', 'datetime64[s]'), ('end', 'datetime64[s]'), ('type', 'O')]]] \u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 Source code in omicron/models/security.py @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None init () async classmethod \u00b6 \u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Exceptions: Type Description DataNotReadyError \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Source code in omicron/models/security.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True load_securities () async classmethod \u00b6 \u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Source code in omicron/models/security.py @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None save_securities ( securities , dt ) async classmethod \u00b6 \u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default securities List[str] \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 required Source code in omicron/models/security.py @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) save_xrxd_reports ( reports , dt ) async classmethod \u00b6 \u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default reports List[str] \u5206\u7ea2\u9001\u80a1\u516c\u544a required Source code in omicron/models/security.py @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) update_secs_cache ( dt , securities ) async classmethod \u00b6 \u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Parameters: Name Type Description Default dt date \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f required securities List[Tuple] \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b required Source code in omicron/models/security.py @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" ))","title":"security"},{"location":"api/security/#omicron.models.security.Query","text":"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531 Security.select() \u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7 eval \u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 Source code in omicron/models/security.py class Query : \"\"\"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531`Security.select()`\u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7`eval`\u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 \"\"\" def __init__ ( self , target_date : datetime . date = None ): if target_date is None : # \u805a\u5bbd\u4e0d\u4e00\u5b9a\u4f1a\u53ca\u65f6\u66f4\u65b0\u6570\u636e\uff0c\u56e0\u6b64db\u4e2d\u4e0d\u5b58\u653e\u5f53\u5929\u7684\u6570\u636e\uff0c\u5982\u679c\u4f20\u7a7a\uff0c\u67e5cache self . target_date = None else : # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u53d6\u5f53\u5929\uff0c\u5426\u5219\u53d6\u524d\u4e00\u5929 self . target_date = tf . day_shift ( target_date , 0 ) # \u540d\u5b57\uff0c\u663e\u793a\u540d\uff0c\u7c7b\u578b\u8fc7\u6ee4\u5668 self . _name_pattern = None # \u5b57\u6bcd\u540d\u5b57 self . _alias_pattern = None # \u663e\u793a\u540d self . _type_pattern = None # \u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4e3a\u5168\u90e8\uff0c\u5982\u679c\u4f20\u5165\u7a7a\u503c\u5219\u53ea\u9009\u62e9\u80a1\u7968\u548c\u6307\u6570 # \u5f00\u5173\u9009\u9879 self . _exclude_kcb = False # \u79d1\u521b\u677f self . _exclude_cyb = False # \u521b\u4e1a\u677f self . _exclude_st = False # ST self . _include_exit = False # \u662f\u5426\u5305\u542b\u5df2\u9000\u5e02\u8bc1\u5238(\u9ed8\u8ba4\u4e0d\u5305\u62ec\u5f53\u5929\u9000\u5e02\u7684) # \u4e0b\u5217\u5f00\u5173\u4f18\u5148\u7ea7\u9ad8\u4e8e\u4e0a\u9762\u7684 self . _only_kcb = False self . _only_cyb = False self . _only_st = False def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results","title":"Query"},{"location":"api/security/#omicron.models.security.Query.alias_like","text":"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Parameters: Name Type Description Default display_name str \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" required Source code in omicron/models/security.py def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self","title":"alias_like()"},{"location":"api/security/#omicron.models.security.Query.eval","text":"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: Type Description List[str] \u4ee3\u7801\u5217\u8868 Source code in omicron/models/security.py async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results","title":"eval()"},{"location":"api/security/#omicron.models.security.Query.exclude_cyb","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self","title":"exclude_cyb()"},{"location":"api/security/#omicron.models.security.Query.exclude_kcb","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self","title":"exclude_kcb()"},{"location":"api/security/#omicron.models.security.Query.exclude_st","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self","title":"exclude_st()"},{"location":"api/security/#omicron.models.security.Query.include_exit","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238 Source code in omicron/models/security.py def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self","title":"include_exit()"},{"location":"api/security/#omicron.models.security.Query.name_like","text":"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0 name \u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Parameters: Name Type Description Default name str \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" required Source code in omicron/models/security.py def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self","title":"name_like()"},{"location":"api/security/#omicron.models.security.Query.only_cyb","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self","title":"only_cyb()"},{"location":"api/security/#omicron.models.security.Query.only_kcb","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self","title":"only_kcb()"},{"location":"api/security/#omicron.models.security.Query.only_st","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238 Source code in omicron/models/security.py def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self","title":"only_st()"},{"location":"api/security/#omicron.models.security.Query.types","text":"\u9009\u62e9\u7c7b\u578b\u5728 types \u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Parameters: Name Type Description Default types List[str] \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 required Source code in omicron/models/security.py def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self","title":"types()"},{"location":"api/security/#omicron.models.security.Security","text":"Source code in omicron/models/security.py class Security : _securities = [] _securities_date = None _security_types = set () _stocks = [] @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None @classmethod async def get_security_types ( cls ): if cls . _security_types : return list ( cls . _security_types ) else : return None @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None @classmethod def fuzzy_match_ex ( cls , query : str ) -> Dict [ str , Tuple ]: # fixme: \u6b64\u65b9\u6cd5\u4e0eStock.fuzzy_match\u91cd\u590d\uff0c\u5e76\u4e14\u8fdb\u884c\u4e86\u7c7b\u578b\u9650\u5236\uff0c\u4f7f\u5f97\u5176\u4e0d\u9002\u5408\u653e\u5728Security\u91cc\uff0c\u4ee5\u53ca\u4f5c\u4e3a\u4e00\u4e2a\u901a\u7528\u65b9\u6cd5 query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"code\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"name\" ] . startswith ( query ) and sec [ \"type\" ] == \"stock\" } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"alias\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } @classmethod async def info ( cls , code , date = None ): _obj = await cls . query_security_via_date ( code , date ) if _obj is None : return None # \"_time\", \"code\", \"type\", \"alias\", \"end\", \"ipo\", \"name\" d1 = convert_nptime_to_datetime ( _obj [ \"ipo\" ]) . date () d2 = convert_nptime_to_datetime ( _obj [ \"end\" ]) . date () return { \"type\" : _obj [ \"type\" ], \"display_name\" : _obj [ \"alias\" ], \"alias\" : _obj [ \"alias\" ], \"end\" : d2 , \"start\" : d1 , \"name\" : _obj [ \"name\" ], } @classmethod async def name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"name\" ] @classmethod async def alias ( cls , code , date = None ): return await cls . display_name ( code , date ) @classmethod async def display_name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"alias\" ] @classmethod async def start_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"ipo\" ]) . date () @classmethod async def end_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"end\" ]) . date () @classmethod async def security_type ( cls , code , date = None ) -> SecurityType : _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"type\" ] @classmethod async def query_security_via_date ( cls , code : str , date : datetime . date = None ): if date is None : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : date = arrow . get ( date_in_cache ) . date () if date > cls . _securities_date : await cls . load_securities () results = cls . _securities [ cls . _securities [ \"code\" ] == code ] else : # \u4eceinfluxdb\u67e5\u627e date = tf . day_shift ( date , 0 ) results = await cls . load_securities_from_db ( date , code ) if results is not None and len ( results ) > 0 : return results [ 0 ] else : return None @classmethod def select ( cls , date : datetime . date = None ) -> Query : if date is None : return Query ( target_date = None ) else : return Query ( target_date = date ) @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" )) @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) @classmethod async def load_securities_from_db ( cls , target_date : datetime . date , code : str = None ): if target_date is None : return None client = get_influx_client () measurement = \"security_list\" flux = ( Flux () . measurement ( measurement ) . range ( target_date , target_date ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : # \"_time\", \"code\", \"code, alias, name, start, end, type\" _securities = np . array ( [ tuple ( x [ \"info\" ] . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) return _securities else : return None @classmethod async def get_datescope_from_db ( cls ): # fixme: \u51fd\u6570\u540d\u65e0\u6cd5\u53cd\u6620\u7528\u9014\uff0c\u9700\u8981\u589e\u52a0\u6587\u6863\u6ce8\u91ca\uff0c\u8bf4\u660e\u8be5\u51fd\u6570\u7684\u4f5c\u7528,\u6216\u8005\u4e0d\u5e94\u8be5\u51fa\u73b0\u5728\u6b64\u7c7b\u4e2d\uff1f client = get_influx_client () measurement = \"security_list\" date1 = arrow . get ( \"2005-01-01\" ) . date () date2 = arrow . now () . naive . date () flux = ( Flux () . measurement ( measurement ) . range ( date1 , date2 ) . bucket ( client . _bucket ) . tags ({ \"code\" : \"000001.XSHE\" }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None , None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : d1 = convert_nptime_to_datetime ( secs [ 0 ][ \"_time\" ]) d2 = convert_nptime_to_datetime ( secs [ len ( secs ) - 1 ][ \"_time\" ]) return d1 . date (), d2 . date () else : return None , None @classmethod async def _notify_special_bonusnote ( cls , code , note , cancel_date ): # fixme: \u8fd9\u4e2a\u51fd\u6570\u5e94\u8be5\u51fa\u73b0\u5728omega\u4e2d\uff1f default_cancel_date = datetime . date ( 2099 , 1 , 1 ) # \u9ed8\u8ba4\u65e0\u53d6\u6d88\u516c\u544a # report this special event to notify user if cancel_date != default_cancel_date : ding ( \"security %s , bonus_cancel_pub_date %s \" % ( code , cancel_date )) if note . find ( \"\u6d41\u901a\" ) != - 1 : # \u68c0\u67e5\u662f\u5426\u6709\u201c\u6d41\u901a\u80a1\u201d\u6587\u5b57 ding ( \"security %s , special xrxd note: %s \" % ( code , note )) @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) @classmethod async def _load_xrxd_from_db ( cls , code , dt_start : datetime . date , dt_end : datetime . date ): if dt_start is None or dt_end is None : return [] client = get_influx_client () measurement = \"security_xrxd_reports\" flux = ( Flux () . measurement ( measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return [] ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : _reports = np . array ( [ tuple ( x [ \"info\" ] . split ( \"|\" )) for x in secs ], dtype = xrxd_info_dtype ) return _reports else : return [] @classmethod async def get_xrxd_info ( cls , dt : datetime . date , code : str = None ): if dt is None : return None # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) reports = await cls . _load_xrxd_from_db ( code , dt , dt ) if len ( reports ) == 0 : return None readable_reports = [] for report in reports : xr_date = convert_nptime_to_datetime ( report [ 1 ]) . date () readable_reports . append ( { \"code\" : report [ 0 ], \"xr_date\" : xr_date , \"bonus\" : report [ 3 ], \"dividend\" : report [ 4 ], \"transfer\" : report [ 5 ], \"bonusnote\" : report [ 2 ], } ) return readable_reports","title":"Security"},{"location":"api/security/#omicron.models.security.Security.get_stock","text":"\u6839\u636e code \u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 1 2 item = Security . get_stock ( \"000001.XSHE\" ) print ( item [ \"alias\" ]) \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Parameters: Name Type Description Default code \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 required Returns: Type Description numpy.ndarray[Any, numpy.dtype[[('code', 'O'), ('alias', 'O'), ('name', 'O'), ('ipo', 'datetime64[s]'), ('end', 'datetime64[s]'), ('type', 'O')]]] \u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 Source code in omicron/models/security.py @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None","title":"get_stock()"},{"location":"api/security/#omicron.models.security.Security.init","text":"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Exceptions: Type Description DataNotReadyError \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Source code in omicron/models/security.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True","title":"init()"},{"location":"api/security/#omicron.models.security.Security.load_securities","text":"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Source code in omicron/models/security.py @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None","title":"load_securities()"},{"location":"api/security/#omicron.models.security.Security.save_securities","text":"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default securities List[str] \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 required Source code in omicron/models/security.py @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] )","title":"save_securities()"},{"location":"api/security/#omicron.models.security.Security.save_xrxd_reports","text":"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default reports List[str] \u5206\u7ea2\u9001\u80a1\u516c\u544a required Source code in omicron/models/security.py @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ])","title":"save_xrxd_reports()"},{"location":"api/security/#omicron.models.security.Security.update_secs_cache","text":"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Parameters: Name Type Description Default dt date \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f required securities List[Tuple] \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b required Source code in omicron/models/security.py @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" ))","title":"update_secs_cache()"},{"location":"api/stock/","text":"Stock ( Security ) \u00b6 Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 Source code in omicron/models/stock.py class Stock ( Security ): \"\"\" Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 \"\"\" _is_cache_empty = True def __init__ ( self , code : str ): self . _code = code self . _stock = self . get_stock ( code ) assert self . _stock , \"\u7cfb\u7edf\u4e2d\u4e0d\u5b58\u5728\u8be5code\" ( _ , self . _display_name , self . _name , ipo , end , _type ) = self . _stock self . _start_date = convert_nptime_to_datetime ( ipo ) . date () self . _end_date = convert_nptime_to_datetime ( end ) . date () self . _type = SecurityType ( _type ) @classmethod def choose_listed ( cls , dt : datetime . date , types : List [ str ] = [ \"stock\" , \"index\" ]): cond = np . array ([ False ] * len ( cls . _stocks )) dt = datetime . datetime . combine ( dt , datetime . time ()) for type_ in types : cond |= cls . _stocks [ \"type\" ] == type_ result = cls . _stocks [ cond ] result = result [ result [ \"end\" ] > dt ] result = result [ result [ \"ipo\" ] <= dt ] # result = np.array(result, dtype=cls.stock_info_dtype) return result [ \"code\" ] . tolist () @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } def __str__ ( self ): return f \" { self . display_name } [ { self . code } ]\" @property def ipo_date ( self ) -> datetime . date : return self . _start_date @property def display_name ( self ) -> str : return self . _display_name @property def name ( self ) -> str : return self . _name @property def end_date ( self ) -> datetime . date : return self . _end_date @property def code ( self ) -> str : return self . _code @property def sim_code ( self ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , self . code ) @property def security_type ( self ) -> SecurityType : \"\"\"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: SecurityType: [description] \"\"\" return self . _type @staticmethod def simplify_code ( code ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , code ) @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None def days_since_ipo ( self ) -> int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) @staticmethod def qfq ( bars : BarsArray ) -> BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def batch_get_day_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars @classmethod async def get_bars ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise @classmethod async def _get_persisted_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`start`\u5230`end`\u533a\u95f4\u67d0\u652f\u80a1\u7968\u505c\u724c\uff0c\u5219\u4f1a\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" end = end or datetime . datetime . now () keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _get_persisted_bars_n ( cls , code : str , frame_type : FrameType , n : int , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u622a\u6b62\u5230`end`\u7684`n`\u6761\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`end`\u672a\u6307\u5b9a\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u800c\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u5f97\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`bars_dtype`\u7684numpy\u6570\u7ec4 \"\"\" # check is needed since tags accept List as well assert isinstance ( code , str ), \"`code` must be a string\" end = end or datetime . datetime . now () closed_end = tf . floor ( end , frame_type ) start = tf . shift ( closed_end , - min ( 2 * n , n + 20 ), frame_type ) keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) . latest ( n ) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _batch_get_persisted_bars_n ( cls , codes : List [ str ], frame_type : FrameType , n : int , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u622a\u6b62`end`\u65f6\u7684`n`\u6761\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002\u5982\u679c\u4e0d\u5b58\u5728\u6ee1\u8db3\u6307\u5b9a\u6761\u4ef6\u7684\u67e5\u8be2\u7ed3\u679c\uff0c\u5c06\u8fd4\u56de\u7a7a\u7684DataFrame\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u7684\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u5f53\u524d\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * min ( n + 20 , 2 * n ) > max_query_size : raise BadParameterError ( f \"codes\u7684\u6570\u91cf\u548cn\u7684\u4e58\u79ef\u8d85\u8fc7\u4e86influxdb\u7684\u6700\u5927\u67e5\u8be2\u6570\u91cf\u9650\u5236 { max_query_size } \" ) end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) begin = tf . shift ( close_end , - 1 * min ( n + 20 , n * 2 ), frame_type ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) . latest ( n ) ) if codes is not None : assert isinstance ( codes , list ), \"`codes` must be a list or None\" flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () return await client . query ( flux , deserializer ) @classmethod async def _batch_get_persisted_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , begin : Frame , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u5728`begin`\u548c`end`\u4e4b\u95f4\u7684\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u5c06\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 \u6ce8\u610f\uff0c\u8fd4\u56de\u7684\u6570\u636e\u6709\u53ef\u80fd\u4e0d\u662f\u7b49\u957f\u7684\uff0c\u56e0\u4e3a\u6709\u7684\u80a1\u7968\u53ef\u80fd\u505c\u724c\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b begin: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" end = end or datetime . datetime . now () n = tf . count_frames ( begin , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * n > max_query_size : raise BadParameterError ( f \"asked records is { len ( codes ) * n } , which is too large than { max_query_size } \" ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) ) flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () df = await client . query ( flux , deserializer ) return df @classmethod async def batch_cache_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ]): \"\"\"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 Raises: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" if frame_type == FrameType . DAY : await cls . batch_cache_unclosed_bars ( frame_type , bars ) return pl = cache . security . pipeline () for code , bars in bars . items (): key = f \"bars: { frame_type . value } : { code } \" for bar in bars : frame = tf . time2int ( bar [ \"frame\" ] . item ()) val = [ * bar ] val [ 0 ] = frame pl . hset ( key , frame , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def batch_cache_unclosed_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ] ): # pragma: no cover \"\"\"\u7f13\u5b58\u672a\u6536\u76d8\u76845\u300115\u300130\u300160\u5206\u949f\u7ebf\u53ca\u65e5\u7ebf\u3001\u5468\u7ebf\u3001\u6708\u7ebf Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002bars\u4e0d\u80fd\u4e3aNone\uff0c\u6216\u8005empty\u3002 Raise: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" pl = cache . security . pipeline () key = f \"bars: { frame_type . value } :unclosed\" convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int for code , bar in bars . items (): val = [ * bar [ 0 ]] val [ 0 ] = convert ( bar [ \"frame\" ][ 0 ] . item ()) # \u65f6\u95f4\u8f6c\u6362 pl . hset ( key , code , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True @classmethod def _deserialize_cached_bars ( cls , raw : List [ str ], ft : FrameType ) -> BarsArray : \"\"\"\u4eceredis\u4e2d\u53cd\u5e8f\u5217\u5316\u7f13\u5b58\u7684\u6570\u636e \u5982\u679c`raw`\u7a7a\u6570\u7ec4\u6216\u8005\u5143\u7d20\u4e3a`None`\uff0c\u5219\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: raw: redis\u4e2d\u7684\u7f13\u5b58\u6570\u636e ft: \u5e27\u7c7b\u578b sort: \u662f\u5426\u9700\u8981\u91cd\u65b0\u6392\u5e8f\uff0c\u7f3a\u7701\u4e3aFalse Returns: BarsArray: \u884c\u60c5\u6570\u636e \"\"\" fix_date = False if ft in tf . minute_level_frames : convert = tf . int2time else : convert = tf . int2date fix_date = True recs = [] # it's possible to treat raw as csv and use pandas to parse, however, the performance is 10 times worse than this method for raw_rec in raw : if raw_rec is None : continue f , o , h , l , c , v , m , fac = raw_rec . split ( \",\" ) if fix_date : f = f [: 8 ] recs . append ( ( convert ( f ), float ( o ), float ( h ), float ( l ), float ( c ), float ( v ), float ( m ), float ( fac ), ) ) return np . array ( recs , dtype = bars_dtype ) @classmethod async def _batch_get_cached_bars_n ( cls , frame_type : FrameType , n : int , end : Frame = None , codes : List [ str ] = None ) -> BarsPanel : \"\"\"\u6279\u91cf\u83b7\u53d6\u5728cache\u4e2d\u622a\u6b62`end`\u7684`n`\u4e2abars\u3002 \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: frame_type: \u65f6\u95f4\u5e27\u7c7b\u578b n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868 end: \u622a\u6b62\u65f6\u95f4, \u5982\u679c\u4e3aNone Returns: BarsPanel: \u884c\u60c5\u6570\u636e \"\"\" # \u8c03\u7528\u8005\u81ea\u5df1\u4fdd\u8bc1end\u5728\u7f13\u5b58\u4e2d cols = list ( bars_dtype_with_code . names ) if frame_type in tf . day_level_frames : key = f \"bars: { frame_type . value } :unclosed\" if codes is None : recs = await cache . security . hgetall ( key ) codes = list ( recs . keys ()) recs = recs . values () else : recs = await cache . security . hmget ( key , * codes ) barss = cls . _deserialize_cached_bars ( recs , frame_type ) if barss . size > 0 : if len ( barss ) != len ( codes ): # issue 39, \u5982\u679c\u67d0\u652f\u7968\u5f53\u5929\u505c\u724c\uff0c\u5219\u7f13\u5b58\u4e2d\u5c06\u4e0d\u4f1a\u6709\u5b83\u7684\u8bb0\u5f55\uff0c\u6b64\u65f6\u9700\u8981\u79fb\u9664\u5176\u4ee3\u7801 codes = [ codes [ i ] for i , item in enumerate ( recs ) if item is not None ] barss = numpy_append_fields ( barss , \"code\" , codes , [( \"code\" , \"O\" )]) return barss [ cols ] . astype ( bars_dtype_with_code ) else : return np . array ([], dtype = bars_dtype_with_code ) else : end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) all_bars = [] if codes is None : keys = await cache . security . keys ( f \"bars: { frame_type . value } :*[^unclosed]\" ) codes = [ key . split ( \":\" )[ - 1 ] for key in keys ] else : keys = [ f \"bars: { frame_type . value } : { code } \" for code in codes ] if frame_type != FrameType . MIN1 : unclosed = await cache . security . hgetall ( f \"bars: { frame_type . value } :unclosed\" ) else : unclosed = {} pl = cache . security . pipeline () frames = tf . get_frames_by_count ( close_end , n , frame_type ) for key in keys : pl . hmget ( key , * frames ) all_closed = await pl . execute () for code , raw in zip ( codes , all_closed ): raw . append ( unclosed . get ( code )) barss = cls . _deserialize_cached_bars ( raw , frame_type ) barss = numpy_append_fields ( barss , \"code\" , [ code ] * len ( barss ), [( \"code\" , \"O\" )] ) barss = barss [ cols ] . astype ( bars_dtype_with_code ) all_bars . append ( barss [ barss [ \"frame\" ] <= end ][ - n :]) try : return np . concatenate ( all_bars ) except ValueError as e : logger . exception ( e ) return np . array ([], dtype = bars_dtype_with_code ) @classmethod async def _get_cached_bars_n ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u7f13\u5b58\u4e2d\u83b7\u53d6\u6307\u5b9a\u4ee3\u7801\u7684\u884c\u60c5\u6570\u636e \u5b58\u53d6\u903b\u8f91\u662f\uff0c\u4ece`end`\u6307\u5b9a\u7684\u65f6\u95f4\u5411\u524d\u53d6`n`\u6761\u8bb0\u5f55\u3002`end`\u4e0d\u5e94\u8be5\u5927\u4e8e\u5f53\u524d\u7cfb\u7edf\u65f6\u95f4\uff0c\u5e76\u4e14\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u6765\u7684\u8d77\u59cb\u65f6\u95f4\u5e94\u8be5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u3002\u5426\u5219\uff0c\u4e24\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u8bb0\u5f55\u6570\u90fd\u5c06\u5c0f\u4e8e`n`\u3002 \u5982\u679c`end`\u4e0d\u5904\u4e8e`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u7ed3\u675f\u4f4d\u7f6e\uff0c\u4e14\u5c0f\u4e8e\u5f53\u524d\u5df2\u7f13\u5b58\u7684\u672a\u6536\u76d8bar\u65f6\u95f4\uff0c\u5219\u4f1a\u8fd4\u56de\u524d\u4e00\u4e2a\u5df2\u6536\u76d8\u7684\u6570\u636e\uff0c\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u4e2d\u8fd8\u5c06\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e\u3002 args: code: \u8bc1\u5238\u4ee3\u7801\uff0c\u6bd4\u5982000001.XSHE n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 frame_type: \u5e27\u7c7b\u578b end: \u7ed3\u675f\u5e27\uff0c\u5982\u679c\u4e3aNone\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 returns: \u5143\u7d20\u7c7b\u578b\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002\u5982\u679c\u6ca1\u6709\u6570\u636e\uff0c\u5219\u8fd4\u56de\u7a7andarray\u3002 \"\"\" # 50 times faster than arrow.now().floor('second') end = end or datetime . datetime . now () . replace ( second = 0 , microsecond = 0 ) if frame_type in tf . minute_level_frames : cache_start = tf . first_min_frame ( end . date (), frame_type ) closed = tf . floor ( end , frame_type ) frames = ( tf . get_frames ( cache_start , closed , frame_type ))[ - n :] if len ( frames ) == 0 : recs = np . empty ( shape = ( 0 ,), dtype = bars_dtype ) else : key = f \"bars: { frame_type . value } : { code } \" recs = await cache . security . hmget ( key , * frames ) recs = cls . _deserialize_cached_bars ( recs , frame_type ) if closed < end : # for unclosed key = f \"bars: { frame_type . value } :unclosed\" unclosed = await cache . security . hget ( key , code ) unclosed = cls . _deserialize_cached_bars ([ unclosed ], frame_type ) if len ( unclosed ) == 0 : return recs [ - n :] if end < unclosed [ 0 ][ \"frame\" ] . item (): # \u5982\u679cunclosed\u4e3a9:36, \u8c03\u7528\u8005\u8981\u6c42\u53d69:29\u76845m\u6570\u636e\uff0c\u5219\u53d6\u5230\u7684unclosed\u4e0d\u5408\u8981\u6c42\uff0c\u629b\u5f03\u3002\u4f3c\u4e4e\u6ca1\u6709\u66f4\u597d\u7684\u65b9\u6cd5\u68c0\u6d4bend\u4e0eunclosed\u7684\u5173\u7cfb return recs [ - n :] else : bars = np . concatenate (( recs , unclosed )) return bars [ - n :] else : return recs [ - n :] else : # \u65e5\u7ebf\u53ca\u4ee5\u4e0a\u7ea7\u522b\uff0c\u4ec5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u672a\u6536\u76d8\u6570\u636e key = f \"bars: { frame_type . value } :unclosed\" rec = await cache . security . hget ( key , code ) return cls . _deserialize_cached_bars ([ rec ], frame_type ) @classmethod async def cache_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): \"\"\"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note: \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:{code}`\u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" # \u8f6c\u6362\u65f6\u95f4\u4e3aint convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int key = f \"bars: { frame_type . value } : { code } \" pl = cache . security . pipeline () for bar in bars : val = [ * bar ] val [ 0 ] = convert ( bar [ \"frame\" ] . item ()) pl . hset ( key , val [ 0 ], \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def cache_unclosed_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): # pragma: no cover \"\"\"\u5c06\u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 \u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:unclosed`\u4e3akey, {code}\u4e3afield\u7684hashmap\u4e2d\u3002 \u5c3d\u7ba1`bars`\u88ab\u58f0\u660e\u4e3aBarsArray\uff0c\u4f46\u5b9e\u9645\u4e0a\u5e94\u8be5\u53ea\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" converter = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int assert len ( bars ) == 1 , \"unclosed bars should only have one record\" key = f \"bars: { frame_type . value } :unclosed\" bar = bars [ 0 ] val = [ * bar ] val [ 0 ] = converter ( bar [ \"frame\" ] . item ()) await cache . security . hset ( key , code , \",\" . join ( map ( str , val ))) @classmethod async def persist_bars ( cls , frame_type : FrameType , bars : Union [ Dict [ str , BarsArray ], BarsArray , pd . DataFrame ], ): \"\"\"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c`bars`\u7c7b\u578b\u4e3aDict,\u5219key\u4e3a`code`\uff0cvalue\u4e3a`bars`\u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219`bars`\u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a`coretypes.bars_dtype` + (\"code\", \"O\")\u6784\u6210\u3002 Args: frame_type: the frame type of the bars bars: the bars to be persisted Raises: InfluxDBWriteError: if influxdb write failed \"\"\" client = get_influx_client () measurement = cls . _measurement_name ( frame_type ) logger . info ( \"persisting bars to influxdb: %s , %d secs\" , measurement , len ( bars )) if isinstance ( bars , dict ): for code , value in bars . items (): await client . save ( value , measurement , global_tags = { \"code\" : code }, time_key = \"frame\" ) else : await client . save ( bars , measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) @classmethod def _measurement_name ( cls , frame_type ): return f \"stock_bars_ { frame_type . value } \" @classmethod def _resample_from_min1 ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece1\u5206\u949f\u7ebf\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u91cd\u91c7\u6837\u540e\u7684\u6570\u636e\u53ea\u5305\u542bframe, open, high, low, close, volume, amount, factor\uff0c\u65e0\u8bba\u4f20\u5165\u6570\u636e\u662f\u5426\u8fd8\u6709\u522b\u7684\u5b57\u6bb5\uff0c\u5b83\u4eec\u90fd\u5c06\u88ab\u4e22\u5f03\u3002 resampling 240\u6839\u5206\u949f\u7ebf\u52305\u5206\u949f\u5927\u7ea6\u9700\u8981100\u5fae\u79d2\u3002 TODO\uff1a \u5982\u679c`bars`\u4e2d\u5305\u542bnan\u600e\u4e48\u5904\u7406\uff1f \"\"\" if bars [ 0 ][ \"frame\" ] . item () . minute != 31 : raise ValueError ( \"resampling from 1min must start from 9:31\" ) if to_frame not in ( FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , FrameType . DAY , ): raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) bins_len = { FrameType . MIN5 : 5 , FrameType . MIN15 : 15 , FrameType . MIN30 : 30 , FrameType . MIN60 : 60 , FrameType . DAY : 240 , }[ to_frame ] bins = len ( bars ) // bins_len npart1 = bins * bins_len part1 = bars [: npart1 ] . reshape (( - 1 , bins_len )) part2 = bars [ npart1 :] open_pos = np . arange ( bins ) * bins_len close_pos = np . arange ( 1 , bins + 1 ) * bins_len - 1 if len ( bars ) > bins_len * bins : close_pos = np . append ( close_pos , len ( bars ) - 1 ) resampled = np . empty (( bins + 1 ,), dtype = bars_dtype ) else : resampled = np . empty (( bins ,), dtype = bars_dtype ) resampled [: bins ][ \"open\" ] = bars [ open_pos ][ \"open\" ] resampled [: bins ][ \"high\" ] = np . max ( part1 [ \"high\" ], axis = 1 ) resampled [: bins ][ \"low\" ] = np . min ( part1 [ \"low\" ], axis = 1 ) resampled [: bins ][ \"volume\" ] = np . sum ( part1 [ \"volume\" ], axis = 1 ) resampled [: bins ][ \"amount\" ] = np . sum ( part1 [ \"amount\" ], axis = 1 ) if len ( part2 ): resampled [ - 1 ][ \"open\" ] = part2 [ \"open\" ][ 0 ] resampled [ - 1 ][ \"high\" ] = np . max ( part2 [ \"high\" ]) resampled [ - 1 ][ \"low\" ] = np . min ( part2 [ \"low\" ]) resampled [ - 1 ][ \"volume\" ] = np . sum ( part2 [ \"volume\" ]) resampled [ - 1 ][ \"amount\" ] = np . sum ( part2 [ \"amount\" ]) cols = [ \"frame\" , \"close\" , \"factor\" ] resampled [ cols ] = bars [ close_pos ][ cols ] if to_frame == FrameType . DAY : resampled [ \"frame\" ] = bars [ - 1 ][ \"frame\" ] . item () . date () return resampled @classmethod def _resample_from_day ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece\u65e5\u7ebf\u8f6c\u6362\u6210`to_frame`\u7684\u884c\u60c5\u6570\u636e Args: bars (BarsArray): [description] to_frame (FrameType): [description] Returns: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" rules = { \"frame\" : \"last\" , \"open\" : \"first\" , \"high\" : \"max\" , \"low\" : \"min\" , \"close\" : \"last\" , \"volume\" : \"sum\" , \"amount\" : \"sum\" , \"factor\" : \"last\" , } if to_frame == FrameType . WEEK : freq = \"W-Fri\" elif to_frame == FrameType . MONTH : freq = \"M\" elif to_frame == FrameType . QUARTER : freq = \"Q\" elif to_frame == FrameType . YEAR : freq = \"A\" else : raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) df = pd . DataFrame ( bars ) df . index = pd . to_datetime ( bars [ \"frame\" ]) df = df . resample ( freq ) . agg ( rules ) bars = np . array ( df . to_records ( index = False ), dtype = bars_dtype ) # filter out data like (None, nan, ...) return bars [ np . isfinite ( bars [ \"close\" ])] @classmethod async def _get_price_limit_in_cache ( cls , code : str , begin : datetime . date , end : datetime . date ): date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if date_str : date_in_cache = arrow . get ( date_str ) . date () if date_in_cache < begin or date_in_cache > end : return None else : return None dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] hp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .high_limit\" ) lp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .low_limit\" ) if hp is None or lp is None : return None else : return np . array ([( date_in_cache , hp , lp )], dtype = dtype ) @classmethod async def get_trade_price_limits ( cls , code : str , begin : Frame , end : Frame ) -> BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result @classmethod async def reset_price_limits_cache ( cls , cache_only : bool , dt : datetime . date = None ): if cache_only is False : date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if not date_str : return # skip clear action if date not found in cache date_in_cache = arrow . get ( date_str ) . date () if dt is None or date_in_cache != dt : # \u66f4\u65b0\u7684\u65f6\u95f4\u548ccache\u7684\u65f6\u95f4\u76f8\u540c\uff0c\u5219\u6e05\u9664cache return # skip clear action await cache . _security_ . delete ( TRADE_PRICE_LIMITS ) await cache . _security_ . delete ( TRADE_PRICE_LIMITS_DATE ) @classmethod async def save_trade_price_limits ( cls , price_limits : LimitPriceOnlyBarsArray , to_cache : bool ): \"\"\"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Args: price_limits: \u8981\u4fdd\u5b58\u7684\u6da8\u8dcc\u505c\u4ef7\u683c\u6570\u636e\u3002 to_cache: \u662f\u4fdd\u5b58\u5230\u7f13\u5b58\u4e2d\uff0c\u8fd8\u662f\u4fdd\u5b58\u5230\u6301\u4e45\u5316\u5b58\u50a8\u4e2d \"\"\" if len ( price_limits ) == 0 : return if to_cache : # \u6bcf\u4e2a\u4ea4\u6613\u65e5\u4e0a\u53489\u70b9\u66f4\u65b0\u4e24\u6b21 pl = cache . _security_ . pipeline () for row in price_limits : # .item convert np.float64 to python float pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .high_limit\" , row [ \"high_limit\" ] . item (), ) pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .low_limit\" , row [ \"low_limit\" ] . item (), ) dt = price_limits [ - 1 ][ \"frame\" ] pl . set ( TRADE_PRICE_LIMITS_DATE , dt . strftime ( \"%Y-%m- %d \" )) await pl . execute () else : # to influxdb\uff0c \u6bcf\u4e2a\u4ea4\u6613\u65e5\u7684\u7b2c\u4e8c\u5929\u65e9\u4e0a2\u70b9\u4fdd\u5b58 client = get_influx_client () await client . save ( price_limits , cls . _measurement_name ( FrameType . DAY ), tag_keys = \"code\" , time_key = \"frame\" , ) @classmethod async def trade_price_limit_flags ( cls , code : str , start : datetime . date , end : datetime . date ) -> Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data security_type : SecurityType property readonly \u00b6 \u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: Type Description SecurityType [description] batch_cache_bars ( frame_type , bars ) async classmethod \u00b6 \u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Parameters: Name Type Description Default frame_type FrameType \u5e27\u7c7b\u578b required bars Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars batch_get_min_level_bars_in_range ( codes , frame_type , start , end , fq = True ) classmethod \u00b6 \u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1 get_bars \u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a 1 2 async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) \u5982\u679c end \u4e0d\u5728 frame_type \u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c end \u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230 tf.floor(end, frame_type) \u3002 Parameters: Name Type Description Default codes List[str] \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 required frame_type FrameType \u5e27\u7c7b\u578b required start Union[datetime.date, datetime.datetime] \u8d77\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 required fq bool \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True Returns: Type Description Generator[Dict[str, BarsArray], None, None] \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars cache_bars ( code , frame_type , bars ) async classmethod \u00b6 \u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5 bars:{frame_type.value}:{code} \u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Parameters: Name Type Description Default code str the full qualified code of a security or index required frame_type FrameType frame type of the bars required bars numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) format_code ( code ) staticmethod \u00b6 \u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 Source code in omicron/models/stock.py @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None fuzzy_match ( query ) classmethod \u00b6 \u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Parameters: Name Type Description Default query str \u67e5\u8be2\u5b57\u7b26\u4e32 required Returns: Type Description Dict[str, Tuple] \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) Source code in omicron/models/stock.py @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } get_bars ( code , n , frame_type , end = None , fq = True , unclosed = True ) async classmethod \u00b6 \u83b7\u53d6\u5230 end \u4e3a\u6b62\u7684 n \u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3 n \u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4 end \u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e end \u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Parameters: Name Type Description Default code str \u8bc1\u5238\u4ee3\u7801 required n int \u8bb0\u5f55\u6570 required frame_type FrameType \u5e27\u7c7b\u578b required end Union[datetime.date, datetime.datetime] \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 None fq \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a True \u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True unclosed \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. True Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise get_bars_in_range ( code , frame_type , start , end = None , fq = True , unclosed = True ) async classmethod \u00b6 \u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08 code \uff09\u5728[ start , end ]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a frame_type \u7684\u884c\u60c5\u6570\u636e\u3002 Parameters: Name Type Description Default code \u8bc1\u5238\u4ee3\u7801 required frame_type \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b required start \u8d77\u59cb\u65f6\u95f4 required end \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 None fq \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c True unclosed \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e True Source code in omicron/models/stock.py @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars get_latest_price ( codes ) async classmethod \u00b6 \u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Parameters: Name Type Description Default codes Iterable[str] \u4ee3\u7801\u5217\u8868 required Returns: Type Description List[str] \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 Source code in omicron/models/stock.py @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data get_trade_price_limits ( code , begin , end ) async classmethod \u00b6 \u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Parameters: Name Type Description Default code \u4e2a\u80a1\u4ee3\u7801 required begin \u5f00\u59cb\u65e5\u671f required end \u7ed3\u675f\u65e5\u671f required Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result persist_bars ( frame_type , bars ) async classmethod \u00b6 \u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c bars \u7c7b\u578b\u4e3aDict,\u5219key\u4e3a code \uff0cvalue\u4e3a bars \u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219 bars \u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a coretypes.bars_dtype + (\"code\", \"O\")\u6784\u6210\u3002 Parameters: Name Type Description Default frame_type FrameType the frame type of the bars required bars Union[Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars resample ( bars , from_frame , to_frame ) classmethod \u00b6 \u5c06\u539f\u6765\u4e3a from_frame \u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a to_frame \u7684\u884c\u60c5\u6570\u636e \u5982\u679c to_frame \u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c to_frame \u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c from_frame \u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Parameters: Name Type Description Default bars BarsArray \u884c\u60c5\u6570\u636e required from_frame FrameType \u8f6c\u6362\u524d\u7684FrameType required to_frame FrameType \u8f6c\u6362\u540e\u7684FrameType required Returns: Type Description BarsArray \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) reset_cache () async classmethod \u00b6 \u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True save_trade_price_limits ( price_limits , to_cache ) async classmethod \u00b6 \u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Parameters: Name Type Description Default price_limits numpy.ndarray[Any, numpy.dtype[dtype([('frame', 'O'), ('code', 'O'), ('high_limit', ' Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) trade_price_limit_flags_ex ( code , start , end ) async classmethod \u00b6 \u83b7\u53d6\u80a1\u7968 code \u5728 [start, end] \u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Parameters: Name Type Description Default code str \u80a1\u7968\u4ee3\u7801 required start date \u8d77\u59cb\u65e5\u671f required end date \u7ed3\u675f\u65e5\u671f required Returns: Type Description Dict[datetime.date, Tuple[bool, bool]] \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict Source code in omicron/models/stock.py @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results","title":"stock"},{"location":"api/stock/#omicron.models.stock.Stock","text":"Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 Source code in omicron/models/stock.py class Stock ( Security ): \"\"\" Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 \"\"\" _is_cache_empty = True def __init__ ( self , code : str ): self . _code = code self . _stock = self . get_stock ( code ) assert self . _stock , \"\u7cfb\u7edf\u4e2d\u4e0d\u5b58\u5728\u8be5code\" ( _ , self . _display_name , self . _name , ipo , end , _type ) = self . _stock self . _start_date = convert_nptime_to_datetime ( ipo ) . date () self . _end_date = convert_nptime_to_datetime ( end ) . date () self . _type = SecurityType ( _type ) @classmethod def choose_listed ( cls , dt : datetime . date , types : List [ str ] = [ \"stock\" , \"index\" ]): cond = np . array ([ False ] * len ( cls . _stocks )) dt = datetime . datetime . combine ( dt , datetime . time ()) for type_ in types : cond |= cls . _stocks [ \"type\" ] == type_ result = cls . _stocks [ cond ] result = result [ result [ \"end\" ] > dt ] result = result [ result [ \"ipo\" ] <= dt ] # result = np.array(result, dtype=cls.stock_info_dtype) return result [ \"code\" ] . tolist () @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } def __str__ ( self ): return f \" { self . display_name } [ { self . code } ]\" @property def ipo_date ( self ) -> datetime . date : return self . _start_date @property def display_name ( self ) -> str : return self . _display_name @property def name ( self ) -> str : return self . _name @property def end_date ( self ) -> datetime . date : return self . _end_date @property def code ( self ) -> str : return self . _code @property def sim_code ( self ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , self . code ) @property def security_type ( self ) -> SecurityType : \"\"\"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: SecurityType: [description] \"\"\" return self . _type @staticmethod def simplify_code ( code ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , code ) @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None def days_since_ipo ( self ) -> int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) @staticmethod def qfq ( bars : BarsArray ) -> BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def batch_get_day_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars @classmethod async def get_bars ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise @classmethod async def _get_persisted_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`start`\u5230`end`\u533a\u95f4\u67d0\u652f\u80a1\u7968\u505c\u724c\uff0c\u5219\u4f1a\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" end = end or datetime . datetime . now () keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _get_persisted_bars_n ( cls , code : str , frame_type : FrameType , n : int , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u622a\u6b62\u5230`end`\u7684`n`\u6761\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`end`\u672a\u6307\u5b9a\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u800c\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u5f97\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`bars_dtype`\u7684numpy\u6570\u7ec4 \"\"\" # check is needed since tags accept List as well assert isinstance ( code , str ), \"`code` must be a string\" end = end or datetime . datetime . now () closed_end = tf . floor ( end , frame_type ) start = tf . shift ( closed_end , - min ( 2 * n , n + 20 ), frame_type ) keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) . latest ( n ) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _batch_get_persisted_bars_n ( cls , codes : List [ str ], frame_type : FrameType , n : int , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u622a\u6b62`end`\u65f6\u7684`n`\u6761\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002\u5982\u679c\u4e0d\u5b58\u5728\u6ee1\u8db3\u6307\u5b9a\u6761\u4ef6\u7684\u67e5\u8be2\u7ed3\u679c\uff0c\u5c06\u8fd4\u56de\u7a7a\u7684DataFrame\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u7684\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u5f53\u524d\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * min ( n + 20 , 2 * n ) > max_query_size : raise BadParameterError ( f \"codes\u7684\u6570\u91cf\u548cn\u7684\u4e58\u79ef\u8d85\u8fc7\u4e86influxdb\u7684\u6700\u5927\u67e5\u8be2\u6570\u91cf\u9650\u5236 { max_query_size } \" ) end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) begin = tf . shift ( close_end , - 1 * min ( n + 20 , n * 2 ), frame_type ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) . latest ( n ) ) if codes is not None : assert isinstance ( codes , list ), \"`codes` must be a list or None\" flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () return await client . query ( flux , deserializer ) @classmethod async def _batch_get_persisted_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , begin : Frame , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u5728`begin`\u548c`end`\u4e4b\u95f4\u7684\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u5c06\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 \u6ce8\u610f\uff0c\u8fd4\u56de\u7684\u6570\u636e\u6709\u53ef\u80fd\u4e0d\u662f\u7b49\u957f\u7684\uff0c\u56e0\u4e3a\u6709\u7684\u80a1\u7968\u53ef\u80fd\u505c\u724c\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b begin: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" end = end or datetime . datetime . now () n = tf . count_frames ( begin , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * n > max_query_size : raise BadParameterError ( f \"asked records is { len ( codes ) * n } , which is too large than { max_query_size } \" ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) ) flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () df = await client . query ( flux , deserializer ) return df @classmethod async def batch_cache_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ]): \"\"\"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 Raises: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" if frame_type == FrameType . DAY : await cls . batch_cache_unclosed_bars ( frame_type , bars ) return pl = cache . security . pipeline () for code , bars in bars . items (): key = f \"bars: { frame_type . value } : { code } \" for bar in bars : frame = tf . time2int ( bar [ \"frame\" ] . item ()) val = [ * bar ] val [ 0 ] = frame pl . hset ( key , frame , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def batch_cache_unclosed_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ] ): # pragma: no cover \"\"\"\u7f13\u5b58\u672a\u6536\u76d8\u76845\u300115\u300130\u300160\u5206\u949f\u7ebf\u53ca\u65e5\u7ebf\u3001\u5468\u7ebf\u3001\u6708\u7ebf Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002bars\u4e0d\u80fd\u4e3aNone\uff0c\u6216\u8005empty\u3002 Raise: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" pl = cache . security . pipeline () key = f \"bars: { frame_type . value } :unclosed\" convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int for code , bar in bars . items (): val = [ * bar [ 0 ]] val [ 0 ] = convert ( bar [ \"frame\" ][ 0 ] . item ()) # \u65f6\u95f4\u8f6c\u6362 pl . hset ( key , code , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True @classmethod def _deserialize_cached_bars ( cls , raw : List [ str ], ft : FrameType ) -> BarsArray : \"\"\"\u4eceredis\u4e2d\u53cd\u5e8f\u5217\u5316\u7f13\u5b58\u7684\u6570\u636e \u5982\u679c`raw`\u7a7a\u6570\u7ec4\u6216\u8005\u5143\u7d20\u4e3a`None`\uff0c\u5219\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: raw: redis\u4e2d\u7684\u7f13\u5b58\u6570\u636e ft: \u5e27\u7c7b\u578b sort: \u662f\u5426\u9700\u8981\u91cd\u65b0\u6392\u5e8f\uff0c\u7f3a\u7701\u4e3aFalse Returns: BarsArray: \u884c\u60c5\u6570\u636e \"\"\" fix_date = False if ft in tf . minute_level_frames : convert = tf . int2time else : convert = tf . int2date fix_date = True recs = [] # it's possible to treat raw as csv and use pandas to parse, however, the performance is 10 times worse than this method for raw_rec in raw : if raw_rec is None : continue f , o , h , l , c , v , m , fac = raw_rec . split ( \",\" ) if fix_date : f = f [: 8 ] recs . append ( ( convert ( f ), float ( o ), float ( h ), float ( l ), float ( c ), float ( v ), float ( m ), float ( fac ), ) ) return np . array ( recs , dtype = bars_dtype ) @classmethod async def _batch_get_cached_bars_n ( cls , frame_type : FrameType , n : int , end : Frame = None , codes : List [ str ] = None ) -> BarsPanel : \"\"\"\u6279\u91cf\u83b7\u53d6\u5728cache\u4e2d\u622a\u6b62`end`\u7684`n`\u4e2abars\u3002 \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: frame_type: \u65f6\u95f4\u5e27\u7c7b\u578b n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868 end: \u622a\u6b62\u65f6\u95f4, \u5982\u679c\u4e3aNone Returns: BarsPanel: \u884c\u60c5\u6570\u636e \"\"\" # \u8c03\u7528\u8005\u81ea\u5df1\u4fdd\u8bc1end\u5728\u7f13\u5b58\u4e2d cols = list ( bars_dtype_with_code . names ) if frame_type in tf . day_level_frames : key = f \"bars: { frame_type . value } :unclosed\" if codes is None : recs = await cache . security . hgetall ( key ) codes = list ( recs . keys ()) recs = recs . values () else : recs = await cache . security . hmget ( key , * codes ) barss = cls . _deserialize_cached_bars ( recs , frame_type ) if barss . size > 0 : if len ( barss ) != len ( codes ): # issue 39, \u5982\u679c\u67d0\u652f\u7968\u5f53\u5929\u505c\u724c\uff0c\u5219\u7f13\u5b58\u4e2d\u5c06\u4e0d\u4f1a\u6709\u5b83\u7684\u8bb0\u5f55\uff0c\u6b64\u65f6\u9700\u8981\u79fb\u9664\u5176\u4ee3\u7801 codes = [ codes [ i ] for i , item in enumerate ( recs ) if item is not None ] barss = numpy_append_fields ( barss , \"code\" , codes , [( \"code\" , \"O\" )]) return barss [ cols ] . astype ( bars_dtype_with_code ) else : return np . array ([], dtype = bars_dtype_with_code ) else : end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) all_bars = [] if codes is None : keys = await cache . security . keys ( f \"bars: { frame_type . value } :*[^unclosed]\" ) codes = [ key . split ( \":\" )[ - 1 ] for key in keys ] else : keys = [ f \"bars: { frame_type . value } : { code } \" for code in codes ] if frame_type != FrameType . MIN1 : unclosed = await cache . security . hgetall ( f \"bars: { frame_type . value } :unclosed\" ) else : unclosed = {} pl = cache . security . pipeline () frames = tf . get_frames_by_count ( close_end , n , frame_type ) for key in keys : pl . hmget ( key , * frames ) all_closed = await pl . execute () for code , raw in zip ( codes , all_closed ): raw . append ( unclosed . get ( code )) barss = cls . _deserialize_cached_bars ( raw , frame_type ) barss = numpy_append_fields ( barss , \"code\" , [ code ] * len ( barss ), [( \"code\" , \"O\" )] ) barss = barss [ cols ] . astype ( bars_dtype_with_code ) all_bars . append ( barss [ barss [ \"frame\" ] <= end ][ - n :]) try : return np . concatenate ( all_bars ) except ValueError as e : logger . exception ( e ) return np . array ([], dtype = bars_dtype_with_code ) @classmethod async def _get_cached_bars_n ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u7f13\u5b58\u4e2d\u83b7\u53d6\u6307\u5b9a\u4ee3\u7801\u7684\u884c\u60c5\u6570\u636e \u5b58\u53d6\u903b\u8f91\u662f\uff0c\u4ece`end`\u6307\u5b9a\u7684\u65f6\u95f4\u5411\u524d\u53d6`n`\u6761\u8bb0\u5f55\u3002`end`\u4e0d\u5e94\u8be5\u5927\u4e8e\u5f53\u524d\u7cfb\u7edf\u65f6\u95f4\uff0c\u5e76\u4e14\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u6765\u7684\u8d77\u59cb\u65f6\u95f4\u5e94\u8be5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u3002\u5426\u5219\uff0c\u4e24\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u8bb0\u5f55\u6570\u90fd\u5c06\u5c0f\u4e8e`n`\u3002 \u5982\u679c`end`\u4e0d\u5904\u4e8e`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u7ed3\u675f\u4f4d\u7f6e\uff0c\u4e14\u5c0f\u4e8e\u5f53\u524d\u5df2\u7f13\u5b58\u7684\u672a\u6536\u76d8bar\u65f6\u95f4\uff0c\u5219\u4f1a\u8fd4\u56de\u524d\u4e00\u4e2a\u5df2\u6536\u76d8\u7684\u6570\u636e\uff0c\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u4e2d\u8fd8\u5c06\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e\u3002 args: code: \u8bc1\u5238\u4ee3\u7801\uff0c\u6bd4\u5982000001.XSHE n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 frame_type: \u5e27\u7c7b\u578b end: \u7ed3\u675f\u5e27\uff0c\u5982\u679c\u4e3aNone\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 returns: \u5143\u7d20\u7c7b\u578b\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002\u5982\u679c\u6ca1\u6709\u6570\u636e\uff0c\u5219\u8fd4\u56de\u7a7andarray\u3002 \"\"\" # 50 times faster than arrow.now().floor('second') end = end or datetime . datetime . now () . replace ( second = 0 , microsecond = 0 ) if frame_type in tf . minute_level_frames : cache_start = tf . first_min_frame ( end . date (), frame_type ) closed = tf . floor ( end , frame_type ) frames = ( tf . get_frames ( cache_start , closed , frame_type ))[ - n :] if len ( frames ) == 0 : recs = np . empty ( shape = ( 0 ,), dtype = bars_dtype ) else : key = f \"bars: { frame_type . value } : { code } \" recs = await cache . security . hmget ( key , * frames ) recs = cls . _deserialize_cached_bars ( recs , frame_type ) if closed < end : # for unclosed key = f \"bars: { frame_type . value } :unclosed\" unclosed = await cache . security . hget ( key , code ) unclosed = cls . _deserialize_cached_bars ([ unclosed ], frame_type ) if len ( unclosed ) == 0 : return recs [ - n :] if end < unclosed [ 0 ][ \"frame\" ] . item (): # \u5982\u679cunclosed\u4e3a9:36, \u8c03\u7528\u8005\u8981\u6c42\u53d69:29\u76845m\u6570\u636e\uff0c\u5219\u53d6\u5230\u7684unclosed\u4e0d\u5408\u8981\u6c42\uff0c\u629b\u5f03\u3002\u4f3c\u4e4e\u6ca1\u6709\u66f4\u597d\u7684\u65b9\u6cd5\u68c0\u6d4bend\u4e0eunclosed\u7684\u5173\u7cfb return recs [ - n :] else : bars = np . concatenate (( recs , unclosed )) return bars [ - n :] else : return recs [ - n :] else : # \u65e5\u7ebf\u53ca\u4ee5\u4e0a\u7ea7\u522b\uff0c\u4ec5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u672a\u6536\u76d8\u6570\u636e key = f \"bars: { frame_type . value } :unclosed\" rec = await cache . security . hget ( key , code ) return cls . _deserialize_cached_bars ([ rec ], frame_type ) @classmethod async def cache_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): \"\"\"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note: \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:{code}`\u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" # \u8f6c\u6362\u65f6\u95f4\u4e3aint convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int key = f \"bars: { frame_type . value } : { code } \" pl = cache . security . pipeline () for bar in bars : val = [ * bar ] val [ 0 ] = convert ( bar [ \"frame\" ] . item ()) pl . hset ( key , val [ 0 ], \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def cache_unclosed_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): # pragma: no cover \"\"\"\u5c06\u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 \u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:unclosed`\u4e3akey, {code}\u4e3afield\u7684hashmap\u4e2d\u3002 \u5c3d\u7ba1`bars`\u88ab\u58f0\u660e\u4e3aBarsArray\uff0c\u4f46\u5b9e\u9645\u4e0a\u5e94\u8be5\u53ea\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" converter = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int assert len ( bars ) == 1 , \"unclosed bars should only have one record\" key = f \"bars: { frame_type . value } :unclosed\" bar = bars [ 0 ] val = [ * bar ] val [ 0 ] = converter ( bar [ \"frame\" ] . item ()) await cache . security . hset ( key , code , \",\" . join ( map ( str , val ))) @classmethod async def persist_bars ( cls , frame_type : FrameType , bars : Union [ Dict [ str , BarsArray ], BarsArray , pd . DataFrame ], ): \"\"\"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c`bars`\u7c7b\u578b\u4e3aDict,\u5219key\u4e3a`code`\uff0cvalue\u4e3a`bars`\u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219`bars`\u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a`coretypes.bars_dtype` + (\"code\", \"O\")\u6784\u6210\u3002 Args: frame_type: the frame type of the bars bars: the bars to be persisted Raises: InfluxDBWriteError: if influxdb write failed \"\"\" client = get_influx_client () measurement = cls . _measurement_name ( frame_type ) logger . info ( \"persisting bars to influxdb: %s , %d secs\" , measurement , len ( bars )) if isinstance ( bars , dict ): for code , value in bars . items (): await client . save ( value , measurement , global_tags = { \"code\" : code }, time_key = \"frame\" ) else : await client . save ( bars , measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) @classmethod def _measurement_name ( cls , frame_type ): return f \"stock_bars_ { frame_type . value } \" @classmethod def _resample_from_min1 ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece1\u5206\u949f\u7ebf\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u91cd\u91c7\u6837\u540e\u7684\u6570\u636e\u53ea\u5305\u542bframe, open, high, low, close, volume, amount, factor\uff0c\u65e0\u8bba\u4f20\u5165\u6570\u636e\u662f\u5426\u8fd8\u6709\u522b\u7684\u5b57\u6bb5\uff0c\u5b83\u4eec\u90fd\u5c06\u88ab\u4e22\u5f03\u3002 resampling 240\u6839\u5206\u949f\u7ebf\u52305\u5206\u949f\u5927\u7ea6\u9700\u8981100\u5fae\u79d2\u3002 TODO\uff1a \u5982\u679c`bars`\u4e2d\u5305\u542bnan\u600e\u4e48\u5904\u7406\uff1f \"\"\" if bars [ 0 ][ \"frame\" ] . item () . minute != 31 : raise ValueError ( \"resampling from 1min must start from 9:31\" ) if to_frame not in ( FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , FrameType . DAY , ): raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) bins_len = { FrameType . MIN5 : 5 , FrameType . MIN15 : 15 , FrameType . MIN30 : 30 , FrameType . MIN60 : 60 , FrameType . DAY : 240 , }[ to_frame ] bins = len ( bars ) // bins_len npart1 = bins * bins_len part1 = bars [: npart1 ] . reshape (( - 1 , bins_len )) part2 = bars [ npart1 :] open_pos = np . arange ( bins ) * bins_len close_pos = np . arange ( 1 , bins + 1 ) * bins_len - 1 if len ( bars ) > bins_len * bins : close_pos = np . append ( close_pos , len ( bars ) - 1 ) resampled = np . empty (( bins + 1 ,), dtype = bars_dtype ) else : resampled = np . empty (( bins ,), dtype = bars_dtype ) resampled [: bins ][ \"open\" ] = bars [ open_pos ][ \"open\" ] resampled [: bins ][ \"high\" ] = np . max ( part1 [ \"high\" ], axis = 1 ) resampled [: bins ][ \"low\" ] = np . min ( part1 [ \"low\" ], axis = 1 ) resampled [: bins ][ \"volume\" ] = np . sum ( part1 [ \"volume\" ], axis = 1 ) resampled [: bins ][ \"amount\" ] = np . sum ( part1 [ \"amount\" ], axis = 1 ) if len ( part2 ): resampled [ - 1 ][ \"open\" ] = part2 [ \"open\" ][ 0 ] resampled [ - 1 ][ \"high\" ] = np . max ( part2 [ \"high\" ]) resampled [ - 1 ][ \"low\" ] = np . min ( part2 [ \"low\" ]) resampled [ - 1 ][ \"volume\" ] = np . sum ( part2 [ \"volume\" ]) resampled [ - 1 ][ \"amount\" ] = np . sum ( part2 [ \"amount\" ]) cols = [ \"frame\" , \"close\" , \"factor\" ] resampled [ cols ] = bars [ close_pos ][ cols ] if to_frame == FrameType . DAY : resampled [ \"frame\" ] = bars [ - 1 ][ \"frame\" ] . item () . date () return resampled @classmethod def _resample_from_day ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece\u65e5\u7ebf\u8f6c\u6362\u6210`to_frame`\u7684\u884c\u60c5\u6570\u636e Args: bars (BarsArray): [description] to_frame (FrameType): [description] Returns: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" rules = { \"frame\" : \"last\" , \"open\" : \"first\" , \"high\" : \"max\" , \"low\" : \"min\" , \"close\" : \"last\" , \"volume\" : \"sum\" , \"amount\" : \"sum\" , \"factor\" : \"last\" , } if to_frame == FrameType . WEEK : freq = \"W-Fri\" elif to_frame == FrameType . MONTH : freq = \"M\" elif to_frame == FrameType . QUARTER : freq = \"Q\" elif to_frame == FrameType . YEAR : freq = \"A\" else : raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) df = pd . DataFrame ( bars ) df . index = pd . to_datetime ( bars [ \"frame\" ]) df = df . resample ( freq ) . agg ( rules ) bars = np . array ( df . to_records ( index = False ), dtype = bars_dtype ) # filter out data like (None, nan, ...) return bars [ np . isfinite ( bars [ \"close\" ])] @classmethod async def _get_price_limit_in_cache ( cls , code : str , begin : datetime . date , end : datetime . date ): date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if date_str : date_in_cache = arrow . get ( date_str ) . date () if date_in_cache < begin or date_in_cache > end : return None else : return None dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] hp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .high_limit\" ) lp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .low_limit\" ) if hp is None or lp is None : return None else : return np . array ([( date_in_cache , hp , lp )], dtype = dtype ) @classmethod async def get_trade_price_limits ( cls , code : str , begin : Frame , end : Frame ) -> BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result @classmethod async def reset_price_limits_cache ( cls , cache_only : bool , dt : datetime . date = None ): if cache_only is False : date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if not date_str : return # skip clear action if date not found in cache date_in_cache = arrow . get ( date_str ) . date () if dt is None or date_in_cache != dt : # \u66f4\u65b0\u7684\u65f6\u95f4\u548ccache\u7684\u65f6\u95f4\u76f8\u540c\uff0c\u5219\u6e05\u9664cache return # skip clear action await cache . _security_ . delete ( TRADE_PRICE_LIMITS ) await cache . _security_ . delete ( TRADE_PRICE_LIMITS_DATE ) @classmethod async def save_trade_price_limits ( cls , price_limits : LimitPriceOnlyBarsArray , to_cache : bool ): \"\"\"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Args: price_limits: \u8981\u4fdd\u5b58\u7684\u6da8\u8dcc\u505c\u4ef7\u683c\u6570\u636e\u3002 to_cache: \u662f\u4fdd\u5b58\u5230\u7f13\u5b58\u4e2d\uff0c\u8fd8\u662f\u4fdd\u5b58\u5230\u6301\u4e45\u5316\u5b58\u50a8\u4e2d \"\"\" if len ( price_limits ) == 0 : return if to_cache : # \u6bcf\u4e2a\u4ea4\u6613\u65e5\u4e0a\u53489\u70b9\u66f4\u65b0\u4e24\u6b21 pl = cache . _security_ . pipeline () for row in price_limits : # .item convert np.float64 to python float pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .high_limit\" , row [ \"high_limit\" ] . item (), ) pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .low_limit\" , row [ \"low_limit\" ] . item (), ) dt = price_limits [ - 1 ][ \"frame\" ] pl . set ( TRADE_PRICE_LIMITS_DATE , dt . strftime ( \"%Y-%m- %d \" )) await pl . execute () else : # to influxdb\uff0c \u6bcf\u4e2a\u4ea4\u6613\u65e5\u7684\u7b2c\u4e8c\u5929\u65e9\u4e0a2\u70b9\u4fdd\u5b58 client = get_influx_client () await client . save ( price_limits , cls . _measurement_name ( FrameType . DAY ), tag_keys = \"code\" , time_key = \"frame\" , ) @classmethod async def trade_price_limit_flags ( cls , code : str , start : datetime . date , end : datetime . date ) -> Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data","title":"Stock"},{"location":"api/stock/#omicron.models.stock.Stock.security_type","text":"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: Type Description SecurityType [description]","title":"security_type"},{"location":"api/stock/#omicron.models.stock.Stock.batch_cache_bars","text":"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Parameters: Name Type Description Default frame_type FrameType \u5e27\u7c7b\u578b required bars Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars","title":"batch_get_day_level_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.batch_get_min_level_bars_in_range","text":"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1 get_bars \u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a 1 2 async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) \u5982\u679c end \u4e0d\u5728 frame_type \u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c end \u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230 tf.floor(end, frame_type) \u3002 Parameters: Name Type Description Default codes List[str] \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 required frame_type FrameType \u5e27\u7c7b\u578b required start Union[datetime.date, datetime.datetime] \u8d77\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 required fq bool \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True Returns: Type Description Generator[Dict[str, BarsArray], None, None] \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars","title":"batch_get_min_level_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.cache_bars","text":"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5 bars:{frame_type.value}:{code} \u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Parameters: Name Type Description Default code str the full qualified code of a security or index required frame_type FrameType frame type of the bars required bars numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ())","title":"days_since_ipo()"},{"location":"api/stock/#omicron.models.stock.Stock.format_code","text":"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 Source code in omicron/models/stock.py @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None","title":"format_code()"},{"location":"api/stock/#omicron.models.stock.Stock.fuzzy_match","text":"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Parameters: Name Type Description Default query str \u67e5\u8be2\u5b57\u7b26\u4e32 required Returns: Type Description Dict[str, Tuple] \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) Source code in omicron/models/stock.py @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 }","title":"fuzzy_match()"},{"location":"api/stock/#omicron.models.stock.Stock.get_bars","text":"\u83b7\u53d6\u5230 end \u4e3a\u6b62\u7684 n \u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3 n \u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4 end \u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e end \u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Parameters: Name Type Description Default code str \u8bc1\u5238\u4ee3\u7801 required n int \u8bb0\u5f55\u6570 required frame_type FrameType \u5e27\u7c7b\u578b required end Union[datetime.date, datetime.datetime] \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 None fq \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a True \u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True unclosed \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. True Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise","title":"get_bars()"},{"location":"api/stock/#omicron.models.stock.Stock.get_bars_in_range","text":"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08 code \uff09\u5728[ start , end ]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a frame_type \u7684\u884c\u60c5\u6570\u636e\u3002 Parameters: Name Type Description Default code \u8bc1\u5238\u4ee3\u7801 required frame_type \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b required start \u8d77\u59cb\u65f6\u95f4 required end \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 None fq \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c True unclosed \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e True Source code in omicron/models/stock.py @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars","title":"get_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.get_latest_price","text":"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Parameters: Name Type Description Default codes Iterable[str] \u4ee3\u7801\u5217\u8868 required Returns: Type Description List[str] \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 Source code in omicron/models/stock.py @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data","title":"get_latest_price()"},{"location":"api/stock/#omicron.models.stock.Stock.get_trade_price_limits","text":"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Parameters: Name Type Description Default code \u4e2a\u80a1\u4ee3\u7801 required begin \u5f00\u59cb\u65e5\u671f required end \u7ed3\u675f\u65e5\u671f required Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result","title":"get_trade_price_limits()"},{"location":"api/stock/#omicron.models.stock.Stock.persist_bars","text":"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c bars \u7c7b\u578b\u4e3aDict,\u5219key\u4e3a code \uff0cvalue\u4e3a bars \u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219 bars \u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a coretypes.bars_dtype + (\"code\", \"O\")\u6784\u6210\u3002 Parameters: Name Type Description Default frame_type FrameType the frame type of the bars required bars Union[Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars","title":"qfq()"},{"location":"api/stock/#omicron.models.stock.Stock.resample","text":"\u5c06\u539f\u6765\u4e3a from_frame \u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a to_frame \u7684\u884c\u60c5\u6570\u636e \u5982\u679c to_frame \u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c to_frame \u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c from_frame \u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Parameters: Name Type Description Default bars BarsArray \u884c\u60c5\u6570\u636e required from_frame FrameType \u8f6c\u6362\u524d\u7684FrameType required to_frame FrameType \u8f6c\u6362\u540e\u7684FrameType required Returns: Type Description BarsArray \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" )","title":"resample()"},{"location":"api/stock/#omicron.models.stock.Stock.reset_cache","text":"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True","title":"reset_cache()"},{"location":"api/stock/#omicron.models.stock.Stock.save_trade_price_limits","text":"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Parameters: Name Type Description Default price_limits numpy.ndarray[Any, numpy.dtype[dtype([('frame', 'O'), ('code', 'O'), ('high_limit', ' Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), )","title":"trade_price_limit_flags()"},{"location":"api/stock/#omicron.models.stock.Stock.trade_price_limit_flags_ex","text":"\u83b7\u53d6\u80a1\u7968 code \u5728 [start, end] \u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Parameters: Name Type Description Default code str \u80a1\u7968\u4ee3\u7801 required start date \u8d77\u59cb\u65e5\u671f required end date \u7ed3\u675f\u65e5\u671f required Returns: Type Description Dict[datetime.date, Tuple[bool, bool]] \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict Source code in omicron/models/stock.py @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results","title":"trade_price_limit_flags_ex()"},{"location":"api/strategy/","text":"base \u00b6 BacktestState dataclass \u00b6 BacktestState(start: Union[datetime.date, datetime.datetime], end: Union[datetime.date, datetime.datetime], barss: Union[NoneType, Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) async def sell ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ float ] = None , percent : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) async def filter_paused_stock ( self , buylist : List [ str ], dt : datetime . date ): secs = await Security . select ( dt ) . eval () in_trading = jq . get_price ( secs , fields = [ \"paused\" ], start_date = dt , end_date = dt , skip_paused = True )[ \"code\" ] . to_numpy () return np . intersect1d ( buylist , in_trading ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss : Dict [ str , BarsArray ], ** kwargs , ): \"\"\"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Args: frame: \u5f53\u524d\u65f6\u95f4\u5e27 frame_type: \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f i: \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 barss: \u5982\u679c\u8c03\u7528`backtest`\u65f6\u4f20\u5165\u4e86`portfolio`\u53ca`min_bars`\u53c2\u6570\uff0c\u5219`backtest`\u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528`predict`\u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7`barss`\u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9`predict`\u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 Keyword Args: \u5728`backtest`\u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 \"\"\" raise NotImplementedError async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot () cash property readonly \u00b6 \u8fd4\u56de\u5f53\u524d\u53ef\u7528\u73b0\u91d1 __init__ ( self , url , account = None , token = None , name = None , ver = None , is_backtest = True , start = None , end = None , frame_type = None , baseline = '399300.XSHE' ) special \u00b6 \u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default url str \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 required start Union[datetime.date, datetime.datetime] \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None end Union[datetime.date, datetime.datetime] \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None account Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 None token Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 None is_backtest bool \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 True name Optional[str] \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 None ver Optional[str] \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. None start Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 None end Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 None frame_type Optional[coretypes.types.FrameType] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f None baseline Optional[str] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 '399300.XSHE' Source code in omicron/strategy/base.py def __init__ ( self , url : str , account : Optional [ str ] = None , token : Optional [ str ] = None , name : Optional [ str ] = None , ver : Optional [ str ] = None , is_backtest : bool = True , start : Optional [ Frame ] = None , end : Optional [ Frame ] = None , frame_type : Optional [ FrameType ] = None , baseline : Optional [ str ] = \"399300.XSHE\" , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: url: \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 start: \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 end: \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 account: \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 token: \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 is_backtest: \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 name: \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 ver: \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. start: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 end: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 frame_type: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f baseline: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 \"\"\" self . ver = ver or \"0.1\" self . name = name or self . __class__ . __name__ . lower () + f \"_v { self . ver } \" self . token = token or uuid . uuid4 () . hex self . account = account or f \"smallcap- { self . token [ - 4 :] } \" self . url = url if is_backtest : if start is None or end is None or frame_type is None : raise ValueError ( \"start, end and frame_type must be presented.\" ) self . bs = BacktestState ( start , end , None , 0 ) self . bills = None self . metrics = None self . _frame_type = frame_type self . broker = TraderClient ( url , self . account , self . token , is_backtest = True , start = self . bs . start , end = self . bs . end , ) self . _baseline = baseline else : if account is None or token is None : raise ValueError ( \"account and token must be presented.\" ) self . broker = TraderClient ( url , self . account , self . token , is_backtest = False ) available_shares ( self , sec , dt = None ) \u00b6 \u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728 dt \u65e5\u7684\u53ef\u552e\u80a1\u6570 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required dt Union[datetime.date, datetime.datetime] \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 None Source code in omicron/strategy/base.py def available_shares ( self , sec : str , dt : Optional [ Frame ] = None ): \"\"\"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728`dt`\u65e5\u7684\u53ef\u552e\u80a1\u6570 Args: sec: \u8bc1\u5238\u4ee3\u7801 dt: \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 \"\"\" return self . broker . available_shares ( sec , dt ) backtest ( self , stop_on_error = True , ** kwargs ) async \u00b6 \u6267\u884c\u56de\u6d4b Parameters: Name Type Description Default stop_on_error bool \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 True Keyword arguments: Name Type Description portfolio Dict[str, BarsArray] \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c Source code in omicron/strategy/base.py async def backtest ( self , stop_on_error : bool = True , ** kwargs ): \"\"\"\u6267\u884c\u56de\u6d4b Args: stop_on_error: \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 Keyword Args: portfolio Dict[str, BarsArray]: \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int: \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c \"\"\" portfolio : List [ str ] = kwargs . get ( \"portfolio\" ) # type: ignore n = kwargs . get ( \"min_bars\" , 0 ) await self . _cache_bars_for_backtest ( portfolio , n ) self . bs . cursor = n converter = ( tf . int2date if self . _frame_type in tf . day_level_frames else tf . int2time ) for i , frame in enumerate ( tf . get_frames ( self . bs . start , self . bs . end , self . _frame_type ) # type: ignore ): barss = self . _next () logger . debug ( \" %s th iteration\" , i , date = converter ( frame )) try : await self . predict ( converter ( frame ), self . _frame_type , i , barss = barss , ** kwargs # type: ignore ) except Exception as e : logger . exception ( e ) if stop_on_error : raise e self . broker . stop_backtest () self . bills = self . broker . bills () self . metrics = self . broker . metrics ( baseline = self . _baseline ) buy ( self , sec , price = None , vol = None , money = None , order_time = None ) async \u00b6 \u4e70\u5165\u80a1\u7968 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required price Optional[float] \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 None vol Optional[int] \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 None money Optional[float] \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 None order_time Optional[datetime.datetime] \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 None Returns: Type Description Dict \u89c1traderclient\u4e2d\u7684 buy \u65b9\u6cd5\u3002 Source code in omicron/strategy/base.py async def buy ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ int ] = None , money : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) peek ( self , code , n ) async \u00b6 \u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 Source code in omicron/strategy/base.py async def peek ( self , code : str , n : int ): \"\"\"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 \"\"\" if self . bs is None or self . bs . barss is None : raise ValueError ( \"data is not cached\" ) if code in self . bs . barss : if self . bs . cursor + n + 1 < len ( self . bs . barss [ code ]): return Stock . qfq ( self . bs . barss [ code ][ self . bs . cursor : self . bs . cursor + n ] ) else : raise ValueError ( \"data is not cached\" ) plot_metrics ( self , indicator = None ) async \u00b6 \u7b56\u7565\u56de\u6d4b\u62a5\u544a Parameters: Name Type Description Default indicator Union[pandas.core.frame.DataFrame, List[Tuple]] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame None Source code in omicron/strategy/base.py async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot () positions ( self , dt = None ) \u00b6 \u8fd4\u56de\u5f53\u524d\u6301\u4ed3 Source code in omicron/strategy/base.py def positions ( self , dt : Optional [ datetime . date ] = None ): \"\"\"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3\"\"\" return self . broker . positions ( dt ) predict ( self , frame , frame_type , i , barss , ** kwargs ) async \u00b6 \u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) sma \u00b6 SMAStrategy ( BaseStrategy ) \u00b6 Source code in omicron/strategy/sma.py class SMAStrategy ( BaseStrategy ): def __init__ ( self , sec : str , n_short : int = 5 , n_long : int = 10 , * args , ** kwargs ): self . _sec = sec self . _n_short = n_short self . _n_long = n_long self . indicators = [] super () . __init__ ( * args , ** kwargs ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) ) predict ( self , frame , frame_type , i , barss , ** kwargs ) async \u00b6 \u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss \u5982\u679c\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 required Keyword Args: \u5728 backtest \u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 Source code in omicron/strategy/sma.py async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"\u7b56\u7565\u6846\u67b6"},{"location":"api/strategy/#omicron.strategy.base","text":"","title":"base"},{"location":"api/strategy/#omicron.strategy.base.BacktestState","text":"BacktestState(start: Union[datetime.date, datetime.datetime], end: Union[datetime.date, datetime.datetime], barss: Union[NoneType, Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) async def sell ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ float ] = None , percent : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) async def filter_paused_stock ( self , buylist : List [ str ], dt : datetime . date ): secs = await Security . select ( dt ) . eval () in_trading = jq . get_price ( secs , fields = [ \"paused\" ], start_date = dt , end_date = dt , skip_paused = True )[ \"code\" ] . to_numpy () return np . intersect1d ( buylist , in_trading ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss : Dict [ str , BarsArray ], ** kwargs , ): \"\"\"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Args: frame: \u5f53\u524d\u65f6\u95f4\u5e27 frame_type: \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f i: \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 barss: \u5982\u679c\u8c03\u7528`backtest`\u65f6\u4f20\u5165\u4e86`portfolio`\u53ca`min_bars`\u53c2\u6570\uff0c\u5219`backtest`\u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528`predict`\u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7`barss`\u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9`predict`\u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 Keyword Args: \u5728`backtest`\u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 \"\"\" raise NotImplementedError async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot ()","title":"BaseStrategy"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.cash","text":"\u8fd4\u56de\u5f53\u524d\u53ef\u7528\u73b0\u91d1","title":"cash"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.__init__","text":"\u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default url str \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 required start Union[datetime.date, datetime.datetime] \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None end Union[datetime.date, datetime.datetime] \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None account Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 None token Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 None is_backtest bool \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 True name Optional[str] \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 None ver Optional[str] \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. None start Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 None end Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 None frame_type Optional[coretypes.types.FrameType] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f None baseline Optional[str] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 '399300.XSHE' Source code in omicron/strategy/base.py def __init__ ( self , url : str , account : Optional [ str ] = None , token : Optional [ str ] = None , name : Optional [ str ] = None , ver : Optional [ str ] = None , is_backtest : bool = True , start : Optional [ Frame ] = None , end : Optional [ Frame ] = None , frame_type : Optional [ FrameType ] = None , baseline : Optional [ str ] = \"399300.XSHE\" , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: url: \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 start: \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 end: \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 account: \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 token: \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 is_backtest: \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 name: \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 ver: \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. start: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 end: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 frame_type: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f baseline: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 \"\"\" self . ver = ver or \"0.1\" self . name = name or self . __class__ . __name__ . lower () + f \"_v { self . ver } \" self . token = token or uuid . uuid4 () . hex self . account = account or f \"smallcap- { self . token [ - 4 :] } \" self . url = url if is_backtest : if start is None or end is None or frame_type is None : raise ValueError ( \"start, end and frame_type must be presented.\" ) self . bs = BacktestState ( start , end , None , 0 ) self . bills = None self . metrics = None self . _frame_type = frame_type self . broker = TraderClient ( url , self . account , self . token , is_backtest = True , start = self . bs . start , end = self . bs . end , ) self . _baseline = baseline else : if account is None or token is None : raise ValueError ( \"account and token must be presented.\" ) self . broker = TraderClient ( url , self . account , self . token , is_backtest = False )","title":"__init__()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.available_shares","text":"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728 dt \u65e5\u7684\u53ef\u552e\u80a1\u6570 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required dt Union[datetime.date, datetime.datetime] \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 None Source code in omicron/strategy/base.py def available_shares ( self , sec : str , dt : Optional [ Frame ] = None ): \"\"\"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728`dt`\u65e5\u7684\u53ef\u552e\u80a1\u6570 Args: sec: \u8bc1\u5238\u4ee3\u7801 dt: \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 \"\"\" return self . broker . available_shares ( sec , dt )","title":"available_shares()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.backtest","text":"\u6267\u884c\u56de\u6d4b Parameters: Name Type Description Default stop_on_error bool \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 True Keyword arguments: Name Type Description portfolio Dict[str, BarsArray] \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c Source code in omicron/strategy/base.py async def backtest ( self , stop_on_error : bool = True , ** kwargs ): \"\"\"\u6267\u884c\u56de\u6d4b Args: stop_on_error: \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 Keyword Args: portfolio Dict[str, BarsArray]: \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int: \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c \"\"\" portfolio : List [ str ] = kwargs . get ( \"portfolio\" ) # type: ignore n = kwargs . get ( \"min_bars\" , 0 ) await self . _cache_bars_for_backtest ( portfolio , n ) self . bs . cursor = n converter = ( tf . int2date if self . _frame_type in tf . day_level_frames else tf . int2time ) for i , frame in enumerate ( tf . get_frames ( self . bs . start , self . bs . end , self . _frame_type ) # type: ignore ): barss = self . _next () logger . debug ( \" %s th iteration\" , i , date = converter ( frame )) try : await self . predict ( converter ( frame ), self . _frame_type , i , barss = barss , ** kwargs # type: ignore ) except Exception as e : logger . exception ( e ) if stop_on_error : raise e self . broker . stop_backtest () self . bills = self . broker . bills () self . metrics = self . broker . metrics ( baseline = self . _baseline )","title":"backtest()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.buy","text":"\u4e70\u5165\u80a1\u7968 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required price Optional[float] \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 None vol Optional[int] \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 None money Optional[float] \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 None order_time Optional[datetime.datetime] \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 None Returns: Type Description Dict \u89c1traderclient\u4e2d\u7684 buy \u65b9\u6cd5\u3002 Source code in omicron/strategy/base.py async def buy ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ int ] = None , money : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time )","title":"buy()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.peek","text":"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 Source code in omicron/strategy/base.py async def peek ( self , code : str , n : int ): \"\"\"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 \"\"\" if self . bs is None or self . bs . barss is None : raise ValueError ( \"data is not cached\" ) if code in self . bs . barss : if self . bs . cursor + n + 1 < len ( self . bs . barss [ code ]): return Stock . qfq ( self . bs . barss [ code ][ self . bs . cursor : self . bs . cursor + n ] ) else : raise ValueError ( \"data is not cached\" )","title":"peek()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.plot_metrics","text":"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Parameters: Name Type Description Default indicator Union[pandas.core.frame.DataFrame, List[Tuple]] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame None Source code in omicron/strategy/base.py async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot ()","title":"plot_metrics()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.positions","text":"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3 Source code in omicron/strategy/base.py def positions ( self , dt : Optional [ datetime . date ] = None ): \"\"\"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3\"\"\" return self . broker . positions ( dt )","title":"positions()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.predict","text":"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time )","title":"sell()"},{"location":"api/strategy/#omicron.strategy.sma","text":"","title":"sma"},{"location":"api/strategy/#omicron.strategy.sma.SMAStrategy","text":"Source code in omicron/strategy/sma.py class SMAStrategy ( BaseStrategy ): def __init__ ( self , sec : str , n_short : int = 5 , n_long : int = 10 , * args , ** kwargs ): self . _sec = sec self . _n_short = n_short self . _n_long = n_long self . indicators = [] super () . __init__ ( * args , ** kwargs ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"SMAStrategy"},{"location":"api/strategy/#omicron.strategy.sma.SMAStrategy.predict","text":"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss \u5982\u679c\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 required Keyword Args: \u5728 backtest \u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 Source code in omicron/strategy/sma.py async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"predict()"},{"location":"api/talib/","text":"core \u00b6 angle ( ts , threshold = 0.01 , loss_func = 're' ) \u00b6 \u6c42\u65f6\u95f4\u5e8f\u5217 ts \u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e x \u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53 angle \u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53 angle \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c ts \u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np . array ([ i for i in range ( 5 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 45, rad: pi/2 0.707 >>> ts = np . array ([ np . sqrt ( 3 ) / 3 * i for i in range ( 10 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 30, rad: pi/6 0.866 >>> ts = np . array ([ - np . sqrt ( 3 ) / 3 * i for i in range ( 7 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 150, rad: 5*pi/6 - 0.866 Parameters: Name Type Description Default ts required Returns: Type Description Tuple[float, float] \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 Source code in omicron/talib/core.py def angle ( ts , threshold = 0.01 , loss_func = \"re\" ) -> Tuple [ float , float ]: \"\"\"\u6c42\u65f6\u95f4\u5e8f\u5217`ts`\u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e`x`\u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53`angle`\u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53`angle` \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c`ts`\u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np.array([ i for i in range(5)]) >>> round(angle(ts)[1], 3) # degree: 45, rad: pi/2 0.707 >>> ts = np.array([ np.sqrt(3) / 3 * i for i in range(10)]) >>> round(angle(ts)[1],3) # degree: 30, rad: pi/6 0.866 >>> ts = np.array([ -np.sqrt(3) / 3 * i for i in range(7)]) >>> round(angle(ts)[1], 3) # degree: 150, rad: 5*pi/6 -0.866 Args: ts: Returns: \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) if err > threshold : return ( err , None ) v = np . array ([ 1 , a + b ]) vx = np . array ([ 1 , 0 ]) return err , copysign ( np . dot ( v , vx ) / ( norm ( v ) * norm ( vx )), a ) clustering ( numbers , n ) \u00b6 \u5c06\u6570\u7ec4 numbers \u5212\u5206\u4e3a n \u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np . array ([ 1 , 1 , 1 , 2 , 4 , 6 , 8 , 7 , 4 , 5 , 6 ]) >>> clustering ( numbers , 2 ) [( 0 , 4 ), ( 4 , 7 )] Returns: Type Description List[Tuple[int, int]] \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 Source code in omicron/talib/core.py def clustering ( numbers : np . ndarray , n : int ) -> List [ Tuple [ int , int ]]: \"\"\"\u5c06\u6570\u7ec4`numbers`\u5212\u5206\u4e3a`n`\u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np.array([1,1,1,2,4,6,8,7,4,5,6]) >>> clustering(numbers, 2) [(0, 4), (4, 7)] Returns: \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 \"\"\" result = ckwrap . cksegs ( numbers , n ) clusters = [] for pos , size in zip ( result . centers , result . sizes ): clusters . append (( int ( pos - size // 2 - 1 ), int ( size ))) return clusters exp_moving_average ( values , window ) \u00b6 Numpy implementation of EMA Source code in omicron/talib/core.py def exp_moving_average ( values , window ): \"\"\"Numpy implementation of EMA\"\"\" weights = np . exp ( np . linspace ( - 1.0 , 0.0 , window )) weights /= weights . sum () a = np . convolve ( values , weights , mode = \"full\" )[: len ( values )] a [: window ] = a [ window ] return a mean_absolute_error ( y , y_hat ) \u00b6 \u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> mean_absolute_error ( y , y ) 0.0 >>> mean_absolute_error ( y , y_hat ) 0.8 Parameters: Name Type Description Default y np.array \u771f\u503c\u5e8f\u5217 required y_hat \u6bd4\u8f83\u5e8f\u5217 required Returns: Type Description float \u5e73\u5747\u7edd\u5bf9\u503c\u5dee Source code in omicron/talib/core.py def mean_absolute_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> mean_absolute_error(y, y) 0.0 >>> mean_absolute_error(y, y_hat) 0.8 Args: y (np.array): \u771f\u503c\u5e8f\u5217 y_hat: \u6bd4\u8f83\u5e8f\u5217 Returns: float: \u5e73\u5747\u7edd\u5bf9\u503c\u5dee \"\"\" return nanmean ( np . abs ( y - y_hat )) moving_average ( ts , win , padding = True ) \u00b6 \u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np . arange ( 7 ) >>> moving_average ( ts , 5 ) array ([ nan , nan , nan , nan , 2. , 3. , 4. ]) Parameters: Name Type Description Default ts Sequence the input array required win int the window size required padding if True, then the return will be equal length as input, padding with np.NaN at the beginning True Returns: Type Description ndarray The moving mean of the input array along the specified axis. The output has the same shape as the input. Source code in omicron/talib/core.py def moving_average ( ts : Sequence , win : int , padding = True ) -> np . ndarray : \"\"\"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np.arange(7) >>> moving_average(ts, 5) array([nan, nan, nan, nan, 2., 3., 4.]) Args: ts (Sequence): the input array win (int): the window size padding: if True, then the return will be equal length as input, padding with np.NaN at the beginning Returns: The moving mean of the input array along the specified axis. The output has the same shape as the input. \"\"\" ma = move_mean ( ts , win ) if padding : return ma else : return ma [ win - 1 :] normalize ( X , scaler = 'maxabs' ) \u00b6 \u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 sklearn Examples: >>> X = [[ 1. , - 1. , 2. ], ... [ 2. , 0. , 0. ], ... [ 0. , 1. , - 1. ]] >>> expected = [[ 0.4082 , - 0.4082 , 0.8165 ], ... [ 1. , 0. , 0. ], ... [ 0. , 0.7071 , - 0.7071 ]] >>> X_hat = normalize ( X , scaler = 'unit_vector' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 4 ) >>> expected = [[ 0.5 , - 1. , 1. ], ... [ 1. , 0. , 0. ], ... [ 0. , 1. , - 0.5 ]] >>> X_hat = normalize ( X , scaler = 'maxabs' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 2 ) >>> expected = [[ 0.5 , 0. , 1. ], ... [ 1. , 0.5 , 0.33333333 ], ... [ 0. , 1. , 0. ]] >>> X_hat = normalize ( X , scaler = 'minmax' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) >>> X = [[ 0 , 0 ], ... [ 0 , 0 ], ... [ 1 , 1 ], ... [ 1 , 1 ]] >>> expected = [[ - 1. , - 1. ], ... [ - 1. , - 1. ], ... [ 1. , 1. ], ... [ 1. , 1. ]] >>> X_hat = normalize ( X , scaler = 'standard' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) Parameters: Name Type Description Default X 2D array required scaler str [description]. Defaults to 'maxabs_scale'. 'maxabs' Source code in omicron/talib/core.py def normalize ( X , scaler = \"maxabs\" ): \"\"\"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 [sklearn] [sklearn]: https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#results Examples: >>> X = [[ 1., -1., 2.], ... [ 2., 0., 0.], ... [ 0., 1., -1.]] >>> expected = [[ 0.4082, -0.4082, 0.8165], ... [ 1., 0., 0.], ... [ 0., 0.7071, -0.7071]] >>> X_hat = normalize(X, scaler='unit_vector') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal=4) >>> expected = [[0.5, -1., 1.], ... [1., 0., 0.], ... [0., 1., -0.5]] >>> X_hat = normalize(X, scaler='maxabs') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 2) >>> expected = [[0.5 , 0. , 1. ], ... [1. , 0.5 , 0.33333333], ... [0. , 1. , 0. ]] >>> X_hat = normalize(X, scaler='minmax') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal= 3) >>> X = [[0, 0], ... [0, 0], ... [1, 1], ... [1, 1]] >>> expected = [[-1., -1.], ... [-1., -1.], ... [ 1., 1.], ... [ 1., 1.]] >>> X_hat = normalize(X, scaler='standard') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 3) Args: X (2D array): scaler (str, optional): [description]. Defaults to 'maxabs_scale'. \"\"\" if scaler == \"maxabs\" : return MaxAbsScaler () . fit_transform ( X ) elif scaler == \"unit_vector\" : return sklearn . preprocessing . normalize ( X , norm = \"l2\" ) elif scaler == \"minmax\" : return minmax_scale ( X ) elif scaler == \"standard\" : return StandardScaler () . fit_transform ( X ) pct_error ( y , y_hat ) \u00b6 \u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> pct_error ( y , y_hat ) 0.4 Parameters: Name Type Description Default y np.array [description] required y_hat np.array [description] required Returns: Type Description float [description] Source code in omicron/talib/core.py def pct_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> pct_error(y, y_hat) 0.4 Args: y (np.array): [description] y_hat (np.array): [description] Returns: float: [description] \"\"\" mae = mean_absolute_error ( y , y_hat ) return mae / nanmean ( np . abs ( y )) polyfit ( ts , deg = 2 , loss_func = 're' ) \u00b6 \u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [ i for i in range ( 5 )] >>> err , ( a , b ) = polyfit ( ts , deg = 1 ) >>> print ( round ( err , 3 ), round ( a , 1 )) 0.0 1.0 Parameters: Name Type Description Default ts Sequence \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 required deg int \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 2 loss_func str \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a mae , rmse , mse \u6216 re \u3002Defaults to re (relative_error) 're' Returns: Type Description [Tuple] \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) Source code in omicron/talib/core.py def polyfit ( ts : Sequence , deg : int = 2 , loss_func = \"re\" ) -> Tuple : \"\"\"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [i for i in range(5)] >>> err, (a, b) = polyfit(ts, deg=1) >>> print(round(err, 3), round(a, 1)) 0.0 1.0 Args: ts (Sequence): \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 deg (int): \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 loss_func (str): \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a`mae`, `rmse`,`mse` \u6216`re`\u3002Defaults to `re` (relative_error) Returns: [Tuple]: \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) \"\"\" if deg not in ( 1 , 2 ): raise ValueError ( \"deg must be 1 or 2\" ) try : if any ( np . isnan ( ts )): raise ValueError ( \"ts contains nan\" ) x = np . array ( list ( range ( len ( ts )))) z = np . polyfit ( x , ts , deg = deg ) p = np . poly1d ( z ) ts_hat = np . array ([ p ( xi ) for xi in x ]) if loss_func == \"mse\" : error = np . mean ( np . square ( ts - ts_hat )) elif loss_func == \"rmse\" : error = np . sqrt ( np . mean ( np . square ( ts - ts_hat ))) elif loss_func == \"mae\" : error = mean_absolute_error ( ts , ts_hat ) else : # defaults to relative error error = pct_error ( ts , ts_hat ) if deg == 2 : a , b , c = z [ 0 ], z [ 1 ], z [ 2 ] axis_x = - b / ( 2 * a ) if a != 0 : axis_y = ( 4 * a * c - b * b ) / ( 4 * a ) else : axis_y = None return error , z , ( axis_x , axis_y ) elif deg == 1 : return error , z except Exception : error = 1e9 if deg == 1 : return error , ( np . nan , np . nan ) else : return error , ( np . nan , np . nan , np . nan ), ( np . nan , np . nan ) slope ( ts , loss_func = 're' ) \u00b6 \u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Parameters: Name Type Description Default ts np.array [description] required loss_func str [description]. Defaults to 're'. 're' Source code in omicron/talib/core.py def slope ( ts : np . array , loss_func = \"re\" ): \"\"\"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Args: ts (np.array): [description] loss_func (str, optional): [description]. Defaults to 're'. \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) return err , a smooth ( ts , win , poly_order = 1 , mode = 'interp' ) \u00b6 \u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Parameters: Name Type Description Default ts np.array [description] required win int [description] required poly_order int [description]. Defaults to 1. 1 Source code in omicron/talib/core.py def smooth ( ts : np . array , win : int , poly_order = 1 , mode = \"interp\" ): \"\"\"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Args: ts (np.array): [description] win (int): [description] poly_order (int, optional): [description]. Defaults to 1. \"\"\" return savgol_filter ( ts , win , poly_order , mode = mode ) weighted_moving_average ( ts , win ) \u00b6 \u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Parameters: Name Type Description Default ts np.array [description] required win int [description] required Returns: Type Description np.array [description] Source code in omicron/talib/core.py def weighted_moving_average ( ts : np . array , win : int ) -> np . array : \"\"\"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Args: ts (np.array): [description] win (int): [description] Returns: np.array: [description] \"\"\" w = [ 2 * ( i + 1 ) / ( win * ( win + 1 )) for i in range ( win )] return np . convolve ( ts , w , \"valid\" ) morph \u00b6 \u5f62\u6001\u68c0\u6d4b\u76f8\u5173\u65b9\u6cd5 BreakoutFlag ( IntEnum ) \u00b6 An enumeration. Source code in omicron/talib/morph.py class BreakoutFlag ( IntEnum ): UP = 1 DOWN = - 1 NONE = 0 CrossFlag ( IntEnum ) \u00b6 An enumeration. Source code in omicron/talib/morph.py class CrossFlag ( IntEnum ): UPCROSS = 1 DOWNCROSS = - 1 NONE = 0 breakout ( ts , upthres = 0.01 , downthres =- 0.01 , confirm = 1 ) \u00b6 \u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys 0.01 downthres float \u8bf7\u53c2\u8003 peaks_and_valleys -0.01 confirm int \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 1 Returns: Type Description BreakoutFlag \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 Source code in omicron/talib/morph.py def breakout ( ts : np . ndarray , upthres : float = 0.01 , downthres : float = - 0.01 , confirm : int = 1 ) -> BreakoutFlag : \"\"\"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] confirm (int, optional): \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 Returns: \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 \"\"\" support , resist , _ = support_resist_lines ( ts [: - confirm ], upthres , downthres ) x0 = len ( ts ) - confirm - 1 x = list ( range ( len ( ts ) - confirm , len ( ts ))) if resist is not None : if np . all ( ts [ x ] > resist ( x )) and ts [ x0 ] <= resist ( x0 ): return BreakoutFlag . UP if support is not None : if np . all ( ts [ x ] < support ( x )) and ts [ x0 ] >= support ( x0 ): return BreakoutFlag . DOWN return BreakoutFlag . NONE cross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 Returns: Type Description CrossFlag (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g Source code in omicron/talib/morph.py def cross ( f : np . ndarray , g : np . ndarray ) -> CrossFlag : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 returns: (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 0 : return CrossFlag . NONE , 0 # \u5982\u679c\u5b58\u5728\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u4ea4\u70b9\uff0c\u53d6\u6700\u540e\u4e00\u4e2a idx = indices [ - 1 ] if f [ idx ] < g [ idx ]: return CrossFlag . UPCROSS , idx elif f [ idx ] > g [ idx ]: return CrossFlag . DOWNCROSS , idx else : return CrossFlag ( np . sign ( g [ idx - 1 ] - f [ idx - 1 ])), idx energy_hump ( bars , thresh = 2 ) \u00b6 \u68c0\u6d4b bars \u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Parameters: Name Type Description Default bars [('frame', ' Optional [ Tuple [ int , int ]]: \"\"\"\u68c0\u6d4b`bars`\u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Args: bars: \u884c\u60c5\u6570\u636e thresh: \u6700\u540e\u4e00\u6ce2\u91cf\u5fc5\u987b\u5927\u4e8e20\u5929\u5747\u91cf\u7684\u500d\u6570\u3002 Returns: \u5982\u679c\u4e0d\u5b58\u5728\u80fd\u91cf\u9a7c\u5cf0\u7684\u60c5\u5f62\uff0c\u5219\u8fd4\u56deNone\uff0c\u5426\u5219\u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u9a7c\u5cf0\u79bb\u73b0\u5728\u7684\u8ddd\u79bb\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \"\"\" vol = bars [ \"volume\" ] std = np . std ( vol [ 1 :] / vol [: - 1 ]) pvs = peak_valley_pivots ( vol , std , 0 ) frames = bars [ \"frame\" ] pvs [ 0 ] = 0 pvs [ - 1 ] = - 1 peaks = np . argwhere ( pvs == 1 ) mn = np . mean ( vol [ peaks ]) # \u9876\u70b9\u4e0d\u80fd\u7f29\u91cf\u5230\u5c16\u5cf0\u5747\u503c\u4ee5\u4e0b real_peaks = np . intersect1d ( np . argwhere ( vol > mn ), peaks ) if len ( real_peaks ) < 2 : return None logger . debug ( \"found %s peaks at %s \" , len ( real_peaks ), frames [ real_peaks ]) lp = real_peaks [ - 1 ] ma = moving_average ( vol , 20 )[ lp ] if vol [ lp ] < ma * thresh : logger . debug ( \"vol of last peak[ %s ] is less than mean_vol(20) * thresh[ %s ]\" , vol [ lp ], ma * thresh , ) return None return len ( bars ) - real_peaks [ - 1 ], real_peaks [ - 1 ] - real_peaks [ 0 ] inverse_vcross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Parameters: Name Type Description Default f np.array [description] required g np.array [description] required Returns: Type Description Tuple [description] Source code in omicron/talib/morph.py def inverse_vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Args: f (np.array): [description] g (np.array): [description] Returns: Tuple: [description] \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] < g [ idx0 ] and f [ idx1 ] > g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None ) peaks_and_valleys ( ts , up_thresh = None , down_thresh = None ) \u00b6 \u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required up_thresh float \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee None down_thresh float \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 None Returns: Type Description np.ndarray \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 Source code in omicron/talib/morph.py def peaks_and_valleys ( ts : np . ndarray , up_thresh : Optional [ float ] = None , down_thresh : Optional [ float ] = None , ) -> np . ndarray : \"\"\"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 up_thresh (float): \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee down_thresh (float): \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 Returns: np.ndarray: \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) if any ([ up_thresh is None , down_thresh is None ]): change_rate = ts [ 1 :] / ts [: - 1 ] - 1 std = np . std ( change_rate ) up_thresh = up_thresh or 2 * std down_thresh = down_thresh or - 2 * std return peak_valley_pivots ( ts , up_thresh , down_thresh ) plateaus ( numbers , min_size , fall_in_range_ratio = 0.97 ) \u00b6 \u7edf\u8ba1\u6570\u7ec4 numbers \u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7 fall_in_range_ratio \u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Parameters: Name Type Description Default numbers ndarray \u8f93\u5165\u6570\u7ec4 required min_size int \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 required fall_in_range_ratio float \u8d85\u8fc7 fall_in_range_ratio \u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 0.97 Returns: Type Description List[Tuple] \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 Source code in omicron/talib/morph.py def plateaus ( numbers : np . ndarray , min_size : int , fall_in_range_ratio : float = 0.97 ) -> List [ Tuple ]: \"\"\"\u7edf\u8ba1\u6570\u7ec4`numbers`\u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7`fall_in_range_ratio`\u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Args: numbers: \u8f93\u5165\u6570\u7ec4 min_size: \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 fall_in_range_ratio: \u8d85\u8fc7`fall_in_range_ratio`\u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 Returns: \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 \"\"\" if numbers . size <= min_size : n = 1 else : n = numbers . size // min_size clusters = clustering ( numbers , n ) plats = [] for ( start , length ) in clusters : if length < min_size : continue y = numbers [ start : start + length ] mean = np . mean ( y ) std = np . std ( y ) inrange = len ( y [ np . abs ( y - mean ) < 3 * std ]) ratio = inrange / length if ratio >= fall_in_range_ratio : plats . append (( start , length )) return plats rsi_bottom_distance ( close , thresh = None ) \u00b6 \u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : low_watermark , _ , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u8c37\u503cRSI<30 valley_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] # RSI\u4f4e\u6c34\u5e73\u7684\u6700\u5927\u503c\uff1a\u4f4e\u6c34\u5e73*1.01 low_rsi_index = np . where ( rsi <= low_watermark * 1.01 )[ 0 ] if len ( valley_rsi_index ) > 0 : distance = len ( rsi ) - 1 - valley_rsi_index [ - 1 ] if len ( low_rsi_index ) > 0 : if low_rsi_index [ - 1 ] >= valley_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - low_rsi_index [ - 1 ] return distance rsi_bottom_divergent ( close , thresh = None , rsi_limit = 30 ) \u00b6 \u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 30 Returns: Type Description int \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 30 ) -> int : \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) valley_index = np . where (( pivots == - 1 ) & ( rsi <= rsi_limit ))[ 0 ] if len ( valley_index ) >= 2 : if ( close [ valley_index [ - 1 ]] < close [ valley_index [ - 2 ]]) and ( rsi [ valley_index [ - 1 ]] > rsi [ valley_index [ - 2 ]] ): bottom_dev_distance = length - 1 - valley_index [ - 1 ] return bottom_dev_distance rsi_predict_price ( close , thresh = None ) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[float, float] \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 Source code in omicron/talib/morph.py def rsi_predict_price ( close : np . ndarray , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) valley_rsi , peak_rsi , _ = rsi_watermarks ( close , thresh = thresh ) pivot = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivot [ 0 ], pivot [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e price_change = pd . Series ( close ) . diff ( 1 ) . values ave_price_change = ( abs ( price_change )[ - 6 :] . mean ()) * 5 ave_price_raise = ( np . maximum ( price_change , 0 )[ - 6 :] . mean ()) * 5 if valley_rsi is not None : predicted_low_change = ( ave_price_change ) - ave_price_raise / ( 0.01 * valley_rsi ) if predicted_low_change > 0 : predicted_low_change = 0 predicted_low_price = close [ - 1 ] + predicted_low_change else : predicted_low_price = None if peak_rsi is not None : predicted_high_change = ( ave_price_raise - ave_price_change ) / ( 0.01 * peak_rsi - 1 ) - ave_price_change if predicted_high_change < 0 : predicted_high_change = 0 predicted_high_price = close [ - 1 ] + predicted_high_change else : predicted_high_price = None return predicted_low_price , predicted_high_price rsi_top_distance ( close , thresh = None ) \u00b6 \u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : _ , high_watermark , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u5cf0\u503cRSI>70 peak_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] # RSI\u9ad8\u6c34\u5e73\u7684\u6700\u5c0f\u503c\uff1a\u9ad8\u6c34\u5e73*0.99 high_rsi_index = np . where ( rsi >= high_watermark * 0.99 )[ 0 ] if len ( peak_rsi_index ) > 0 : distance = len ( rsi ) - 1 - peak_rsi_index [ - 1 ] if len ( high_rsi_index ) > 0 : if high_rsi_index [ - 1 ] >= peak_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - high_rsi_index [ - 1 ] return distance rsi_top_divergent ( close , thresh = None , rsi_limit = 70 ) \u00b6 \u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 70 Returns: Type Description Tuple[int, int] \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 70 ) -> Tuple [ int , int ]: \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) peak_index = np . where (( pivots == 1 ) & ( rsi >= rsi_limit ))[ 0 ] if len ( peak_index ) >= 2 : if ( close [ peak_index [ - 1 ]] > close [ peak_index [ - 2 ]]) and ( rsi [ peak_index [ - 1 ]] < rsi [ peak_index [ - 2 ]] ): top_dev_distance = length - 1 - peak_index [ - 1 ] return top_dev_distance rsi_watermarks ( close , thresh = None ) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description Tuple[float, float, float] \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 Source code in omicron/talib/morph.py def rsi_watermarks ( close : np . array , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e # \u5cf0\u503cRSI>70; \u8c37\u5904\u7684RSI<30; peaks_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] valleys_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] if len ( peaks_rsi_index ) == 0 : high_watermark = None elif len ( peaks_rsi_index ) == 1 : high_watermark = rsi [ peaks_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u5747\u503c\u6765\u786e\u5b9a\u8d70\u52bf high_watermark = np . nanmean ( rsi [ peaks_rsi_index [ - 2 :]]) if len ( valleys_rsi_index ) == 0 : low_watermark = None elif len ( valleys_rsi_index ) == 1 : low_watermark = rsi [ valleys_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u6765\u786e\u5b9a\u8d70\u52bf low_watermark = np . nanmean ( rsi [ valleys_rsi_index [ - 2 :]]) return low_watermark , high_watermark , rsi [ - 1 ] support_resist_lines ( ts , upthres = None , downthres = None ) \u00b6 \u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def show_support_resist_lines ( ts ): import plotly.graph_objects as go fig = go . Figure () support , resist , x_start = support_resist_lines ( ts , 0.03 , - 0.03 ) fig . add_trace ( go . Scatter ( x = np . arange ( len ( ts )), y = ts )) x = np . arange ( len ( ts ))[ x_start :] fig . add_trace ( go . Line ( x = x , y = support ( x ))) fig . add_trace ( go . Line ( x = x , y = resist ( x ))) fig . show () np . random . seed ( 1978 ) X = np . cumprod ( 1 + np . random . randn ( 100 ) * 0.01 ) show_support_resist_lines ( X ) the above code will show this Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys None downthres float \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[Callable, Callable, numpy.ndarray] \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone Source code in omicron/talib/morph.py def support_resist_lines ( ts : np . ndarray , upthres : float = None , downthres : float = None ) -> Tuple [ Callable , Callable , np . ndarray ]: \"\"\"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: ```python def show_support_resist_lines(ts): import plotly.graph_objects as go fig = go.Figure() support, resist, x_start = support_resist_lines(ts, 0.03, -0.03) fig.add_trace(go.Scatter(x=np.arange(len(ts)), y=ts)) x = np.arange(len(ts))[x_start:] fig.add_trace(go.Line(x=x, y = support(x))) fig.add_trace(go.Line(x=x, y = resist(x))) fig.show() np.random.seed(1978) X = np.cumprod(1 + np.random.randn(100) * 0.01) show_support_resist_lines(X) ``` the above code will show this ![](https://images.jieyu.ai/images/202204/support_resist.png) Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) pivots = peaks_and_valleys ( ts , upthres , downthres ) pivots [ 0 ] = 0 pivots [ - 1 ] = 0 arg_max = np . argwhere ( pivots == 1 ) . flatten () arg_min = np . argwhere ( pivots == - 1 ) . flatten () resist = None support = None if len ( arg_max ) >= 2 : arg_max = arg_max [ - 2 :] y = ts [ arg_max ] coeff = np . polyfit ( arg_max , y , deg = 1 ) resist = np . poly1d ( coeff ) if len ( arg_min ) >= 2 : arg_min = arg_min [ - 2 :] y = ts [ arg_min ] coeff = np . polyfit ( arg_min , y , deg = 1 ) support = np . poly1d ( coeff ) return support , resist , np . min ([ * arg_min , * arg_max ]) valley_detect ( close , thresh = ( 0.05 , - 0.02 )) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys (0.05, -0.02) Returns: Type Description int \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 Source code in omicron/talib/morph.py def valley_detect ( close : np . ndarray , thresh : Tuple [ float , float ] = ( 0.05 , - 0.02 ) ) -> int : \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) flags = pivots [ pivots != 0 ] increased = None lowest_distance = None if ( flags [ - 2 ] == - 1 ) and ( flags [ - 1 ] == 1 ): length = len ( pivots ) valley_index = np . where ( pivots == - 1 )[ 0 ] increased = ( close [ - 1 ] - close [ valley_index [ - 1 ]]) / close [ valley_index [ - 1 ]] lowest_distance = int ( length - 1 - valley_index [ - 1 ]) return lowest_distance , increased vcross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np . array ([ 3 * i ** 2 - 20 * i + 2 for i in range ( 10 )]) >>> g = np . array ([ i - 5 for i in range ( 10 )]) >>> flag , indices = vcross ( f , g ) >>> assert flag is True >>> assert indices [ 0 ] == 0 >>> assert indices [ 1 ] == 6 Parameters: Name Type Description Default f first sequence required g the second sequence required Returns: Type Description Tuple (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 Source code in omicron/talib/morph.py def vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np.array([ 3 * i ** 2 - 20 * i + 2 for i in range(10)]) >>> g = np.array([ i - 5 for i in range(10)]) >>> flag, indices = vcross(f, g) >>> assert flag is True >>> assert indices[0] == 0 >>> assert indices[1] == 6 Args: f: first sequence g: the second sequence Returns: (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] > g [ idx0 ] and f [ idx1 ] < g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"talib"},{"location":"api/talib/#omicron.talib.core","text":"","title":"core"},{"location":"api/talib/#omicron.talib.core.angle","text":"\u6c42\u65f6\u95f4\u5e8f\u5217 ts \u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e x \u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53 angle \u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53 angle \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c ts \u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np . array ([ i for i in range ( 5 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 45, rad: pi/2 0.707 >>> ts = np . array ([ np . sqrt ( 3 ) / 3 * i for i in range ( 10 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 30, rad: pi/6 0.866 >>> ts = np . array ([ - np . sqrt ( 3 ) / 3 * i for i in range ( 7 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 150, rad: 5*pi/6 - 0.866 Parameters: Name Type Description Default ts required Returns: Type Description Tuple[float, float] \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 Source code in omicron/talib/core.py def angle ( ts , threshold = 0.01 , loss_func = \"re\" ) -> Tuple [ float , float ]: \"\"\"\u6c42\u65f6\u95f4\u5e8f\u5217`ts`\u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e`x`\u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53`angle`\u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53`angle` \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c`ts`\u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np.array([ i for i in range(5)]) >>> round(angle(ts)[1], 3) # degree: 45, rad: pi/2 0.707 >>> ts = np.array([ np.sqrt(3) / 3 * i for i in range(10)]) >>> round(angle(ts)[1],3) # degree: 30, rad: pi/6 0.866 >>> ts = np.array([ -np.sqrt(3) / 3 * i for i in range(7)]) >>> round(angle(ts)[1], 3) # degree: 150, rad: 5*pi/6 -0.866 Args: ts: Returns: \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) if err > threshold : return ( err , None ) v = np . array ([ 1 , a + b ]) vx = np . array ([ 1 , 0 ]) return err , copysign ( np . dot ( v , vx ) / ( norm ( v ) * norm ( vx )), a )","title":"angle()"},{"location":"api/talib/#omicron.talib.core.clustering","text":"\u5c06\u6570\u7ec4 numbers \u5212\u5206\u4e3a n \u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np . array ([ 1 , 1 , 1 , 2 , 4 , 6 , 8 , 7 , 4 , 5 , 6 ]) >>> clustering ( numbers , 2 ) [( 0 , 4 ), ( 4 , 7 )] Returns: Type Description List[Tuple[int, int]] \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 Source code in omicron/talib/core.py def clustering ( numbers : np . ndarray , n : int ) -> List [ Tuple [ int , int ]]: \"\"\"\u5c06\u6570\u7ec4`numbers`\u5212\u5206\u4e3a`n`\u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np.array([1,1,1,2,4,6,8,7,4,5,6]) >>> clustering(numbers, 2) [(0, 4), (4, 7)] Returns: \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 \"\"\" result = ckwrap . cksegs ( numbers , n ) clusters = [] for pos , size in zip ( result . centers , result . sizes ): clusters . append (( int ( pos - size // 2 - 1 ), int ( size ))) return clusters","title":"clustering()"},{"location":"api/talib/#omicron.talib.core.exp_moving_average","text":"Numpy implementation of EMA Source code in omicron/talib/core.py def exp_moving_average ( values , window ): \"\"\"Numpy implementation of EMA\"\"\" weights = np . exp ( np . linspace ( - 1.0 , 0.0 , window )) weights /= weights . sum () a = np . convolve ( values , weights , mode = \"full\" )[: len ( values )] a [: window ] = a [ window ] return a","title":"exp_moving_average()"},{"location":"api/talib/#omicron.talib.core.mean_absolute_error","text":"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> mean_absolute_error ( y , y ) 0.0 >>> mean_absolute_error ( y , y_hat ) 0.8 Parameters: Name Type Description Default y np.array \u771f\u503c\u5e8f\u5217 required y_hat \u6bd4\u8f83\u5e8f\u5217 required Returns: Type Description float \u5e73\u5747\u7edd\u5bf9\u503c\u5dee Source code in omicron/talib/core.py def mean_absolute_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> mean_absolute_error(y, y) 0.0 >>> mean_absolute_error(y, y_hat) 0.8 Args: y (np.array): \u771f\u503c\u5e8f\u5217 y_hat: \u6bd4\u8f83\u5e8f\u5217 Returns: float: \u5e73\u5747\u7edd\u5bf9\u503c\u5dee \"\"\" return nanmean ( np . abs ( y - y_hat ))","title":"mean_absolute_error()"},{"location":"api/talib/#omicron.talib.core.moving_average","text":"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np . arange ( 7 ) >>> moving_average ( ts , 5 ) array ([ nan , nan , nan , nan , 2. , 3. , 4. ]) Parameters: Name Type Description Default ts Sequence the input array required win int the window size required padding if True, then the return will be equal length as input, padding with np.NaN at the beginning True Returns: Type Description ndarray The moving mean of the input array along the specified axis. The output has the same shape as the input. Source code in omicron/talib/core.py def moving_average ( ts : Sequence , win : int , padding = True ) -> np . ndarray : \"\"\"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np.arange(7) >>> moving_average(ts, 5) array([nan, nan, nan, nan, 2., 3., 4.]) Args: ts (Sequence): the input array win (int): the window size padding: if True, then the return will be equal length as input, padding with np.NaN at the beginning Returns: The moving mean of the input array along the specified axis. The output has the same shape as the input. \"\"\" ma = move_mean ( ts , win ) if padding : return ma else : return ma [ win - 1 :]","title":"moving_average()"},{"location":"api/talib/#omicron.talib.core.normalize","text":"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 sklearn Examples: >>> X = [[ 1. , - 1. , 2. ], ... [ 2. , 0. , 0. ], ... [ 0. , 1. , - 1. ]] >>> expected = [[ 0.4082 , - 0.4082 , 0.8165 ], ... [ 1. , 0. , 0. ], ... [ 0. , 0.7071 , - 0.7071 ]] >>> X_hat = normalize ( X , scaler = 'unit_vector' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 4 ) >>> expected = [[ 0.5 , - 1. , 1. ], ... [ 1. , 0. , 0. ], ... [ 0. , 1. , - 0.5 ]] >>> X_hat = normalize ( X , scaler = 'maxabs' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 2 ) >>> expected = [[ 0.5 , 0. , 1. ], ... [ 1. , 0.5 , 0.33333333 ], ... [ 0. , 1. , 0. ]] >>> X_hat = normalize ( X , scaler = 'minmax' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) >>> X = [[ 0 , 0 ], ... [ 0 , 0 ], ... [ 1 , 1 ], ... [ 1 , 1 ]] >>> expected = [[ - 1. , - 1. ], ... [ - 1. , - 1. ], ... [ 1. , 1. ], ... [ 1. , 1. ]] >>> X_hat = normalize ( X , scaler = 'standard' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) Parameters: Name Type Description Default X 2D array required scaler str [description]. Defaults to 'maxabs_scale'. 'maxabs' Source code in omicron/talib/core.py def normalize ( X , scaler = \"maxabs\" ): \"\"\"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 [sklearn] [sklearn]: https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#results Examples: >>> X = [[ 1., -1., 2.], ... [ 2., 0., 0.], ... [ 0., 1., -1.]] >>> expected = [[ 0.4082, -0.4082, 0.8165], ... [ 1., 0., 0.], ... [ 0., 0.7071, -0.7071]] >>> X_hat = normalize(X, scaler='unit_vector') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal=4) >>> expected = [[0.5, -1., 1.], ... [1., 0., 0.], ... [0., 1., -0.5]] >>> X_hat = normalize(X, scaler='maxabs') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 2) >>> expected = [[0.5 , 0. , 1. ], ... [1. , 0.5 , 0.33333333], ... [0. , 1. , 0. ]] >>> X_hat = normalize(X, scaler='minmax') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal= 3) >>> X = [[0, 0], ... [0, 0], ... [1, 1], ... [1, 1]] >>> expected = [[-1., -1.], ... [-1., -1.], ... [ 1., 1.], ... [ 1., 1.]] >>> X_hat = normalize(X, scaler='standard') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 3) Args: X (2D array): scaler (str, optional): [description]. Defaults to 'maxabs_scale'. \"\"\" if scaler == \"maxabs\" : return MaxAbsScaler () . fit_transform ( X ) elif scaler == \"unit_vector\" : return sklearn . preprocessing . normalize ( X , norm = \"l2\" ) elif scaler == \"minmax\" : return minmax_scale ( X ) elif scaler == \"standard\" : return StandardScaler () . fit_transform ( X )","title":"normalize()"},{"location":"api/talib/#omicron.talib.core.pct_error","text":"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> pct_error ( y , y_hat ) 0.4 Parameters: Name Type Description Default y np.array [description] required y_hat np.array [description] required Returns: Type Description float [description] Source code in omicron/talib/core.py def pct_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> pct_error(y, y_hat) 0.4 Args: y (np.array): [description] y_hat (np.array): [description] Returns: float: [description] \"\"\" mae = mean_absolute_error ( y , y_hat ) return mae / nanmean ( np . abs ( y ))","title":"pct_error()"},{"location":"api/talib/#omicron.talib.core.polyfit","text":"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [ i for i in range ( 5 )] >>> err , ( a , b ) = polyfit ( ts , deg = 1 ) >>> print ( round ( err , 3 ), round ( a , 1 )) 0.0 1.0 Parameters: Name Type Description Default ts Sequence \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 required deg int \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 2 loss_func str \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a mae , rmse , mse \u6216 re \u3002Defaults to re (relative_error) 're' Returns: Type Description [Tuple] \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) Source code in omicron/talib/core.py def polyfit ( ts : Sequence , deg : int = 2 , loss_func = \"re\" ) -> Tuple : \"\"\"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [i for i in range(5)] >>> err, (a, b) = polyfit(ts, deg=1) >>> print(round(err, 3), round(a, 1)) 0.0 1.0 Args: ts (Sequence): \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 deg (int): \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 loss_func (str): \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a`mae`, `rmse`,`mse` \u6216`re`\u3002Defaults to `re` (relative_error) Returns: [Tuple]: \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) \"\"\" if deg not in ( 1 , 2 ): raise ValueError ( \"deg must be 1 or 2\" ) try : if any ( np . isnan ( ts )): raise ValueError ( \"ts contains nan\" ) x = np . array ( list ( range ( len ( ts )))) z = np . polyfit ( x , ts , deg = deg ) p = np . poly1d ( z ) ts_hat = np . array ([ p ( xi ) for xi in x ]) if loss_func == \"mse\" : error = np . mean ( np . square ( ts - ts_hat )) elif loss_func == \"rmse\" : error = np . sqrt ( np . mean ( np . square ( ts - ts_hat ))) elif loss_func == \"mae\" : error = mean_absolute_error ( ts , ts_hat ) else : # defaults to relative error error = pct_error ( ts , ts_hat ) if deg == 2 : a , b , c = z [ 0 ], z [ 1 ], z [ 2 ] axis_x = - b / ( 2 * a ) if a != 0 : axis_y = ( 4 * a * c - b * b ) / ( 4 * a ) else : axis_y = None return error , z , ( axis_x , axis_y ) elif deg == 1 : return error , z except Exception : error = 1e9 if deg == 1 : return error , ( np . nan , np . nan ) else : return error , ( np . nan , np . nan , np . nan ), ( np . nan , np . nan )","title":"polyfit()"},{"location":"api/talib/#omicron.talib.core.slope","text":"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Parameters: Name Type Description Default ts np.array [description] required loss_func str [description]. Defaults to 're'. 're' Source code in omicron/talib/core.py def slope ( ts : np . array , loss_func = \"re\" ): \"\"\"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Args: ts (np.array): [description] loss_func (str, optional): [description]. Defaults to 're'. \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) return err , a","title":"slope()"},{"location":"api/talib/#omicron.talib.core.smooth","text":"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Parameters: Name Type Description Default ts np.array [description] required win int [description] required poly_order int [description]. Defaults to 1. 1 Source code in omicron/talib/core.py def smooth ( ts : np . array , win : int , poly_order = 1 , mode = \"interp\" ): \"\"\"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Args: ts (np.array): [description] win (int): [description] poly_order (int, optional): [description]. Defaults to 1. \"\"\" return savgol_filter ( ts , win , poly_order , mode = mode )","title":"smooth()"},{"location":"api/talib/#omicron.talib.core.weighted_moving_average","text":"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Parameters: Name Type Description Default ts np.array [description] required win int [description] required Returns: Type Description np.array [description] Source code in omicron/talib/core.py def weighted_moving_average ( ts : np . array , win : int ) -> np . array : \"\"\"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Args: ts (np.array): [description] win (int): [description] Returns: np.array: [description] \"\"\" w = [ 2 * ( i + 1 ) / ( win * ( win + 1 )) for i in range ( win )] return np . convolve ( ts , w , \"valid\" )","title":"weighted_moving_average()"},{"location":"api/talib/#omicron.talib.morph","text":"\u5f62\u6001\u68c0\u6d4b\u76f8\u5173\u65b9\u6cd5","title":"morph"},{"location":"api/talib/#omicron.talib.morph.BreakoutFlag","text":"An enumeration. Source code in omicron/talib/morph.py class BreakoutFlag ( IntEnum ): UP = 1 DOWN = - 1 NONE = 0","title":"BreakoutFlag"},{"location":"api/talib/#omicron.talib.morph.CrossFlag","text":"An enumeration. Source code in omicron/talib/morph.py class CrossFlag ( IntEnum ): UPCROSS = 1 DOWNCROSS = - 1 NONE = 0","title":"CrossFlag"},{"location":"api/talib/#omicron.talib.morph.breakout","text":"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys 0.01 downthres float \u8bf7\u53c2\u8003 peaks_and_valleys -0.01 confirm int \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 1 Returns: Type Description BreakoutFlag \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 Source code in omicron/talib/morph.py def breakout ( ts : np . ndarray , upthres : float = 0.01 , downthres : float = - 0.01 , confirm : int = 1 ) -> BreakoutFlag : \"\"\"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] confirm (int, optional): \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 Returns: \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 \"\"\" support , resist , _ = support_resist_lines ( ts [: - confirm ], upthres , downthres ) x0 = len ( ts ) - confirm - 1 x = list ( range ( len ( ts ) - confirm , len ( ts ))) if resist is not None : if np . all ( ts [ x ] > resist ( x )) and ts [ x0 ] <= resist ( x0 ): return BreakoutFlag . UP if support is not None : if np . all ( ts [ x ] < support ( x )) and ts [ x0 ] >= support ( x0 ): return BreakoutFlag . DOWN return BreakoutFlag . NONE","title":"breakout()"},{"location":"api/talib/#omicron.talib.morph.cross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 Returns: Type Description CrossFlag (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g Source code in omicron/talib/morph.py def cross ( f : np . ndarray , g : np . ndarray ) -> CrossFlag : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 returns: (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 0 : return CrossFlag . NONE , 0 # \u5982\u679c\u5b58\u5728\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u4ea4\u70b9\uff0c\u53d6\u6700\u540e\u4e00\u4e2a idx = indices [ - 1 ] if f [ idx ] < g [ idx ]: return CrossFlag . UPCROSS , idx elif f [ idx ] > g [ idx ]: return CrossFlag . DOWNCROSS , idx else : return CrossFlag ( np . sign ( g [ idx - 1 ] - f [ idx - 1 ])), idx","title":"cross()"},{"location":"api/talib/#omicron.talib.morph.energy_hump","text":"\u68c0\u6d4b bars \u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Parameters: Name Type Description Default bars [('frame', ' Optional [ Tuple [ int , int ]]: \"\"\"\u68c0\u6d4b`bars`\u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Args: bars: \u884c\u60c5\u6570\u636e thresh: \u6700\u540e\u4e00\u6ce2\u91cf\u5fc5\u987b\u5927\u4e8e20\u5929\u5747\u91cf\u7684\u500d\u6570\u3002 Returns: \u5982\u679c\u4e0d\u5b58\u5728\u80fd\u91cf\u9a7c\u5cf0\u7684\u60c5\u5f62\uff0c\u5219\u8fd4\u56deNone\uff0c\u5426\u5219\u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u9a7c\u5cf0\u79bb\u73b0\u5728\u7684\u8ddd\u79bb\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \"\"\" vol = bars [ \"volume\" ] std = np . std ( vol [ 1 :] / vol [: - 1 ]) pvs = peak_valley_pivots ( vol , std , 0 ) frames = bars [ \"frame\" ] pvs [ 0 ] = 0 pvs [ - 1 ] = - 1 peaks = np . argwhere ( pvs == 1 ) mn = np . mean ( vol [ peaks ]) # \u9876\u70b9\u4e0d\u80fd\u7f29\u91cf\u5230\u5c16\u5cf0\u5747\u503c\u4ee5\u4e0b real_peaks = np . intersect1d ( np . argwhere ( vol > mn ), peaks ) if len ( real_peaks ) < 2 : return None logger . debug ( \"found %s peaks at %s \" , len ( real_peaks ), frames [ real_peaks ]) lp = real_peaks [ - 1 ] ma = moving_average ( vol , 20 )[ lp ] if vol [ lp ] < ma * thresh : logger . debug ( \"vol of last peak[ %s ] is less than mean_vol(20) * thresh[ %s ]\" , vol [ lp ], ma * thresh , ) return None return len ( bars ) - real_peaks [ - 1 ], real_peaks [ - 1 ] - real_peaks [ 0 ]","title":"energy_hump()"},{"location":"api/talib/#omicron.talib.morph.inverse_vcross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Parameters: Name Type Description Default f np.array [description] required g np.array [description] required Returns: Type Description Tuple [description] Source code in omicron/talib/morph.py def inverse_vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Args: f (np.array): [description] g (np.array): [description] Returns: Tuple: [description] \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] < g [ idx0 ] and f [ idx1 ] > g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"inverse_vcross()"},{"location":"api/talib/#omicron.talib.morph.peaks_and_valleys","text":"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required up_thresh float \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee None down_thresh float \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 None Returns: Type Description np.ndarray \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 Source code in omicron/talib/morph.py def peaks_and_valleys ( ts : np . ndarray , up_thresh : Optional [ float ] = None , down_thresh : Optional [ float ] = None , ) -> np . ndarray : \"\"\"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 up_thresh (float): \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee down_thresh (float): \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 Returns: np.ndarray: \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) if any ([ up_thresh is None , down_thresh is None ]): change_rate = ts [ 1 :] / ts [: - 1 ] - 1 std = np . std ( change_rate ) up_thresh = up_thresh or 2 * std down_thresh = down_thresh or - 2 * std return peak_valley_pivots ( ts , up_thresh , down_thresh )","title":"peaks_and_valleys()"},{"location":"api/talib/#omicron.talib.morph.plateaus","text":"\u7edf\u8ba1\u6570\u7ec4 numbers \u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7 fall_in_range_ratio \u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Parameters: Name Type Description Default numbers ndarray \u8f93\u5165\u6570\u7ec4 required min_size int \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 required fall_in_range_ratio float \u8d85\u8fc7 fall_in_range_ratio \u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 0.97 Returns: Type Description List[Tuple] \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 Source code in omicron/talib/morph.py def plateaus ( numbers : np . ndarray , min_size : int , fall_in_range_ratio : float = 0.97 ) -> List [ Tuple ]: \"\"\"\u7edf\u8ba1\u6570\u7ec4`numbers`\u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7`fall_in_range_ratio`\u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Args: numbers: \u8f93\u5165\u6570\u7ec4 min_size: \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 fall_in_range_ratio: \u8d85\u8fc7`fall_in_range_ratio`\u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 Returns: \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 \"\"\" if numbers . size <= min_size : n = 1 else : n = numbers . size // min_size clusters = clustering ( numbers , n ) plats = [] for ( start , length ) in clusters : if length < min_size : continue y = numbers [ start : start + length ] mean = np . mean ( y ) std = np . std ( y ) inrange = len ( y [ np . abs ( y - mean ) < 3 * std ]) ratio = inrange / length if ratio >= fall_in_range_ratio : plats . append (( start , length )) return plats","title":"plateaus()"},{"location":"api/talib/#omicron.talib.morph.rsi_bottom_distance","text":"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : low_watermark , _ , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u8c37\u503cRSI<30 valley_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] # RSI\u4f4e\u6c34\u5e73\u7684\u6700\u5927\u503c\uff1a\u4f4e\u6c34\u5e73*1.01 low_rsi_index = np . where ( rsi <= low_watermark * 1.01 )[ 0 ] if len ( valley_rsi_index ) > 0 : distance = len ( rsi ) - 1 - valley_rsi_index [ - 1 ] if len ( low_rsi_index ) > 0 : if low_rsi_index [ - 1 ] >= valley_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - low_rsi_index [ - 1 ] return distance","title":"rsi_bottom_distance()"},{"location":"api/talib/#omicron.talib.morph.rsi_bottom_divergent","text":"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 30 Returns: Type Description int \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 30 ) -> int : \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) valley_index = np . where (( pivots == - 1 ) & ( rsi <= rsi_limit ))[ 0 ] if len ( valley_index ) >= 2 : if ( close [ valley_index [ - 1 ]] < close [ valley_index [ - 2 ]]) and ( rsi [ valley_index [ - 1 ]] > rsi [ valley_index [ - 2 ]] ): bottom_dev_distance = length - 1 - valley_index [ - 1 ] return bottom_dev_distance","title":"rsi_bottom_divergent()"},{"location":"api/talib/#omicron.talib.morph.rsi_predict_price","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[float, float] \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 Source code in omicron/talib/morph.py def rsi_predict_price ( close : np . ndarray , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) valley_rsi , peak_rsi , _ = rsi_watermarks ( close , thresh = thresh ) pivot = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivot [ 0 ], pivot [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e price_change = pd . Series ( close ) . diff ( 1 ) . values ave_price_change = ( abs ( price_change )[ - 6 :] . mean ()) * 5 ave_price_raise = ( np . maximum ( price_change , 0 )[ - 6 :] . mean ()) * 5 if valley_rsi is not None : predicted_low_change = ( ave_price_change ) - ave_price_raise / ( 0.01 * valley_rsi ) if predicted_low_change > 0 : predicted_low_change = 0 predicted_low_price = close [ - 1 ] + predicted_low_change else : predicted_low_price = None if peak_rsi is not None : predicted_high_change = ( ave_price_raise - ave_price_change ) / ( 0.01 * peak_rsi - 1 ) - ave_price_change if predicted_high_change < 0 : predicted_high_change = 0 predicted_high_price = close [ - 1 ] + predicted_high_change else : predicted_high_price = None return predicted_low_price , predicted_high_price","title":"rsi_predict_price()"},{"location":"api/talib/#omicron.talib.morph.rsi_top_distance","text":"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : _ , high_watermark , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u5cf0\u503cRSI>70 peak_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] # RSI\u9ad8\u6c34\u5e73\u7684\u6700\u5c0f\u503c\uff1a\u9ad8\u6c34\u5e73*0.99 high_rsi_index = np . where ( rsi >= high_watermark * 0.99 )[ 0 ] if len ( peak_rsi_index ) > 0 : distance = len ( rsi ) - 1 - peak_rsi_index [ - 1 ] if len ( high_rsi_index ) > 0 : if high_rsi_index [ - 1 ] >= peak_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - high_rsi_index [ - 1 ] return distance","title":"rsi_top_distance()"},{"location":"api/talib/#omicron.talib.morph.rsi_top_divergent","text":"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 70 Returns: Type Description Tuple[int, int] \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 70 ) -> Tuple [ int , int ]: \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) peak_index = np . where (( pivots == 1 ) & ( rsi >= rsi_limit ))[ 0 ] if len ( peak_index ) >= 2 : if ( close [ peak_index [ - 1 ]] > close [ peak_index [ - 2 ]]) and ( rsi [ peak_index [ - 1 ]] < rsi [ peak_index [ - 2 ]] ): top_dev_distance = length - 1 - peak_index [ - 1 ] return top_dev_distance","title":"rsi_top_divergent()"},{"location":"api/talib/#omicron.talib.morph.rsi_watermarks","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description Tuple[float, float, float] \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 Source code in omicron/talib/morph.py def rsi_watermarks ( close : np . array , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e # \u5cf0\u503cRSI>70; \u8c37\u5904\u7684RSI<30; peaks_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] valleys_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] if len ( peaks_rsi_index ) == 0 : high_watermark = None elif len ( peaks_rsi_index ) == 1 : high_watermark = rsi [ peaks_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u5747\u503c\u6765\u786e\u5b9a\u8d70\u52bf high_watermark = np . nanmean ( rsi [ peaks_rsi_index [ - 2 :]]) if len ( valleys_rsi_index ) == 0 : low_watermark = None elif len ( valleys_rsi_index ) == 1 : low_watermark = rsi [ valleys_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u6765\u786e\u5b9a\u8d70\u52bf low_watermark = np . nanmean ( rsi [ valleys_rsi_index [ - 2 :]]) return low_watermark , high_watermark , rsi [ - 1 ]","title":"rsi_watermarks()"},{"location":"api/talib/#omicron.talib.morph.support_resist_lines","text":"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def show_support_resist_lines ( ts ): import plotly.graph_objects as go fig = go . Figure () support , resist , x_start = support_resist_lines ( ts , 0.03 , - 0.03 ) fig . add_trace ( go . Scatter ( x = np . arange ( len ( ts )), y = ts )) x = np . arange ( len ( ts ))[ x_start :] fig . add_trace ( go . Line ( x = x , y = support ( x ))) fig . add_trace ( go . Line ( x = x , y = resist ( x ))) fig . show () np . random . seed ( 1978 ) X = np . cumprod ( 1 + np . random . randn ( 100 ) * 0.01 ) show_support_resist_lines ( X ) the above code will show this Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys None downthres float \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[Callable, Callable, numpy.ndarray] \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone Source code in omicron/talib/morph.py def support_resist_lines ( ts : np . ndarray , upthres : float = None , downthres : float = None ) -> Tuple [ Callable , Callable , np . ndarray ]: \"\"\"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: ```python def show_support_resist_lines(ts): import plotly.graph_objects as go fig = go.Figure() support, resist, x_start = support_resist_lines(ts, 0.03, -0.03) fig.add_trace(go.Scatter(x=np.arange(len(ts)), y=ts)) x = np.arange(len(ts))[x_start:] fig.add_trace(go.Line(x=x, y = support(x))) fig.add_trace(go.Line(x=x, y = resist(x))) fig.show() np.random.seed(1978) X = np.cumprod(1 + np.random.randn(100) * 0.01) show_support_resist_lines(X) ``` the above code will show this ![](https://images.jieyu.ai/images/202204/support_resist.png) Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) pivots = peaks_and_valleys ( ts , upthres , downthres ) pivots [ 0 ] = 0 pivots [ - 1 ] = 0 arg_max = np . argwhere ( pivots == 1 ) . flatten () arg_min = np . argwhere ( pivots == - 1 ) . flatten () resist = None support = None if len ( arg_max ) >= 2 : arg_max = arg_max [ - 2 :] y = ts [ arg_max ] coeff = np . polyfit ( arg_max , y , deg = 1 ) resist = np . poly1d ( coeff ) if len ( arg_min ) >= 2 : arg_min = arg_min [ - 2 :] y = ts [ arg_min ] coeff = np . polyfit ( arg_min , y , deg = 1 ) support = np . poly1d ( coeff ) return support , resist , np . min ([ * arg_min , * arg_max ])","title":"support_resist_lines()"},{"location":"api/talib/#omicron.talib.morph.valley_detect","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys (0.05, -0.02) Returns: Type Description int \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 Source code in omicron/talib/morph.py def valley_detect ( close : np . ndarray , thresh : Tuple [ float , float ] = ( 0.05 , - 0.02 ) ) -> int : \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) flags = pivots [ pivots != 0 ] increased = None lowest_distance = None if ( flags [ - 2 ] == - 1 ) and ( flags [ - 1 ] == 1 ): length = len ( pivots ) valley_index = np . where ( pivots == - 1 )[ 0 ] increased = ( close [ - 1 ] - close [ valley_index [ - 1 ]]) / close [ valley_index [ - 1 ]] lowest_distance = int ( length - 1 - valley_index [ - 1 ]) return lowest_distance , increased","title":"valley_detect()"},{"location":"api/talib/#omicron.talib.morph.vcross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np . array ([ 3 * i ** 2 - 20 * i + 2 for i in range ( 10 )]) >>> g = np . array ([ i - 5 for i in range ( 10 )]) >>> flag , indices = vcross ( f , g ) >>> assert flag is True >>> assert indices [ 0 ] == 0 >>> assert indices [ 1 ] == 6 Parameters: Name Type Description Default f first sequence required g the second sequence required Returns: Type Description Tuple (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 Source code in omicron/talib/morph.py def vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np.array([ 3 * i ** 2 - 20 * i + 2 for i in range(10)]) >>> g = np.array([ i - 5 for i in range(10)]) >>> flag, indices = vcross(f, g) >>> assert flag is True >>> assert indices[0] == 0 >>> assert indices[1] == 6 Args: f: first sequence g: the second sequence Returns: (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] > g [ idx0 ] and f [ idx1 ] < g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"vcross()"},{"location":"api/timeframe/","text":"TimeFrame \u00b6 Source code in omicron/models/timeframe.py class TimeFrame : minute_level_frames = [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ] day_level_frames = [ FrameType . DAY , FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR , ] ticks = { FrameType . MIN1 : [ i for i in itertools . chain ( range ( 571 , 691 ), range ( 781 , 901 ))], FrameType . MIN5 : [ i for i in itertools . chain ( range ( 575 , 695 , 5 ), range ( 785 , 905 , 5 )) ], FrameType . MIN15 : [ i for i in itertools . chain ( range ( 585 , 705 , 15 ), range ( 795 , 915 , 15 )) ], FrameType . MIN30 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1000\" , \"1030\" , \"1100\" , \"1130\" , \"1330\" , \"1400\" , \"1430\" , \"1500\" ] ], FrameType . MIN60 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1030\" , \"1130\" , \"1400\" , \"1500\" ] ], } day_frames = None week_frames = None month_frames = None quarter_frames = None year_frames = None @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) @classmethod async def _load_calendar ( cls ): \"\"\"\u4ece\u6570\u636e\u7f13\u5b58\u4e2d\u52a0\u8f7d\u66f4\u65b0\u65e5\u5386\"\"\" from omicron import cache names = [ \"day_frames\" , \"week_frames\" , \"month_frames\" , \"quarter_frames\" , \"year_frames\" , ] for name , frame_type in zip ( names , cls . day_level_frames ): key = f \"calendar: { frame_type . value } \" result = await cache . security . lrange ( key , 0 , - 1 ) if result is not None and len ( result ): frames = [ int ( x ) for x in result ] setattr ( cls , name , np . array ( frames )) else : # pragma: no cover raise DataNotReadyError ( f \"calendar data is not ready: { name } missed\" ) @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 @classmethod async def save_calendar ( cls , trade_days ): # avoid circular import from omicron import cache for ft in [ FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR ]: days = cls . resample_frames ( trade_days , ft ) frames = [ cls . date2int ( x ) for x in days ] key = f \"calendar: { ft . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () frames = [ cls . date2int ( x ) for x in trade_days ] key = f \"calendar: { FrameType . DAY . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () @classmethod async def remove_calendar ( cls ): # avoid circular import from omicron import cache for ft in cls . day_level_frames : key = f \"calendar: { ft . value } \" await cache . security . delete ( key ) @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day ceiling ( moment , frame_type ) classmethod \u00b6 \u6c42 moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982 moment \u4e3a14:59\u5206\uff0c\u5982\u679c frame_type \u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Examples: >>> TimeFrame . day_frames = [ 20050104 , 20050105 , 20050106 , 20050107 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . week_frames = [ 20050107 , 20050114 , 20050121 , 20050128 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . month_frames = [ 20050131 , 20050228 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 1 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 15 , 0 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . ceiling ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment datetime.datetime [description] required frame_type FrameType [description] required Returns: Type Description Frame moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c Source code in omicron/models/timeframe.py @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) combine_time ( date , hour , minute = 0 , second = 0 , microsecond = 0 ) classmethod \u00b6 \u7528 date \u6307\u5b9a\u7684\u65e5\u671f\u4e0e hour , minute , second \u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame . combine_time ( datetime . date ( 2020 , 1 , 1 ), 14 , 30 ) datetime . datetime ( 2020 , 1 , 1 , 14 , 30 ) Parameters: Name Type Description Default date [description] required hour [description] required minute [description]. Defaults to 0. 0 second [description]. Defaults to 0. 0 microsecond [description]. Defaults to 0. 0 Returns: Type Description datetime.datetime \u5408\u6210\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) count_day_frames ( start , end ) classmethod \u00b6 calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime . date ( 2019 , 12 , 21 ) >>> end = datetime . date ( 2019 , 12 , 21 ) >>> TimeFrame . day_frames = [ 20191219 , 20191220 , 20191223 , 20191224 , 20191225 ] >>> TimeFrame . count_day_frames ( start , end ) 1 >>> # non-trade days are removed >>> TimeFrame . day_frames = [ 20200121 , 20200122 , 20200123 , 20200203 , 20200204 , 20200205 ] >>> start = datetime . date ( 2020 , 1 , 23 ) >>> end = datetime . date ( 2020 , 2 , 4 ) >>> TimeFrame . count_day_frames ( start , end ) 3 Parameters: Name Type Description Default start Union[datetime.date, Arrow] required end Union[datetime.date, Arrow] required Returns: Type Description int count of days Source code in omicron/models/timeframe.py @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) count_frames ( start , end , frame_type ) classmethod \u00b6 \u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: count_day_frames count_week_frames count_month_frames Parameters: Name Type Description Default start start frame required end end frame required frame_type the type of frame required Exceptions: Type Description ValueError \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: Type Description int \u4ecestart\u5230end\u7684\u5e27\u6570 Source code in omicron/models/timeframe.py @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) count_month_frames ( start , end ) classmethod \u00b6 calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int months between start and end Source code in omicron/models/timeframe.py @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) count_quarter_frames ( start , end ) classmethod \u00b6 calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int quarters between start and end Source code in omicron/models/timeframe.py @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) count_week_frames ( start , end ) classmethod \u00b6 calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int count of weeks Source code in omicron/models/timeframe.py @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) count_year_frames ( start , end ) classmethod \u00b6 calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int years between start and end Source code in omicron/models/timeframe.py @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) date2int ( d ) classmethod \u00b6 \u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame . date2int ( datetime . date ( 2020 , 5 , 1 )) 20200501 Parameters: Name Type Description Default d Union[datetime.datetime, datetime.date, Arrow] date required Returns: Type Description int \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 Source code in omicron/models/timeframe.py @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) day_shift ( start , offset ) classmethod \u00b6 \u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . day_frames = [ 20191212 , 20191213 , 20191216 , 20191217 , 20191218 , 20191219 ] >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 1 ) datetime . date ( 2019 , 12 , 16 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 1 ) datetime . date ( 2019 , 12 , 16 ) Parameters: Name Type Description Default start datetime.date the origin day required offset int days to shift, can be negative required Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) first_min_frame ( day , frame_type ) classmethod \u00b6 \u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a frame_type \u7684 frame \u3002 Examples: >>> TimeFrame . day_frames = np . array ([ 20191227 , 20191230 , 20191231 , 20200102 , 20200103 ]) >>> TimeFrame . first_min_frame ( '2019-12-31' , FrameType . MIN1 ) datetime . datetime ( 2019 , 12 , 31 , 9 , 31 ) Parameters: Name Type Description Default day Union[str, Arrow, Frame] which day? required frame_type FrameType which frame_type? required Returns: Type Description Union[datetime.date, datetime.datetime] day \u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 Source code in omicron/models/timeframe.py @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) floor ( moment , frame_type ) classmethod \u00b6 \u6c42 moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c moment \u4e3a10:37\uff0c\u5219\u5f53 frame_type \u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame . day_frames = np . array ([ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ]) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 7 , 14 , 59 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 6 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 13 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 2 , 27 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 30 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . floor ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment Frame required frame_type FrameType required Returns: Type Description Frame moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c Source code in omicron/models/timeframe.py @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) frame_len ( frame_type ) classmethod \u00b6 \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame . frame_len ( FrameType . MIN5 ) 5 Parameters: Name Type Description Default frame_type FrameType required Returns: Type Description int \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 Source code in omicron/models/timeframe.py @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 get_frame_scope ( frame , ft ) classmethod \u00b6 \u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Parameters: Name Type Description Default frame \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required ft FrameType \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH required Returns: Type Description Tuple[Frame, Frame] \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 Source code in omicron/models/timeframe.py @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end get_frames ( start , end , frame_type ) classmethod \u00b6 \u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7 floor \u6216\u8005 ceiling \u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c\u503c Examples: >>> start = arrow . get ( '2020-1-13 10:00' ) . naive >>> end = arrow . get ( '2020-1-13 13:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200109 , 20200110 , 20200113 , 20200114 , 20200115 , 20200116 ]) >>> TimeFrame . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Parameters: Name Type Description Default start Frame required end Frame required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) get_frames_by_count ( end , n , frame_type ) classmethod \u00b6 \u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06 end \u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c Examples: >>> end = arrow . get ( '2020-1-6 14:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 , 20200109 ]) >>> TimeFrame . get_frames_by_count ( end , 2 , FrameType . MIN30 ) [ 202001061400 , 202001061430 ] Parameters: Name Type Description Default end Arrow required n int required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) get_previous_trade_day ( now ) classmethod \u00b6 \u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Parameters: Name Type Description Default now \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required Returns: Type Description datetime.date \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 Source code in omicron/models/timeframe.py @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day get_ticks ( frame_type ) classmethod \u00b6 \u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame . month_frames = np . array ([ 20050131 , 20050228 , 20050331 ]) >>> TimeFrame . get_ticks ( FrameType . MONTH )[: 3 ] array ([ 20050131 , 20050228 , 20050331 ]) Parameters: Name Type Description Default frame_type [description] required Exceptions: Type Description ValueError [description] Returns: Type Description Union[List, np.array] \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame Source code in omicron/models/timeframe.py @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) init () async classmethod \u00b6 \u521d\u59cb\u5316\u65e5\u5386 Source code in omicron/models/timeframe.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () int2date ( d ) classmethod \u00b6 \u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame . int2date ( 20200501 ) datetime . date ( 2020 , 5 , 1 ) Parameters: Name Type Description Default d Union[int, str] YYYYMMDD\u8868\u793a\u7684\u65e5\u671f required Returns: Type Description datetime.date \u8f6c\u6362\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) int2time ( tm ) classmethod \u00b6 \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a datetime \u7c7b\u578b\u8868\u793a Examples: >>> TimeFrame . int2time ( 202005011500 ) datetime . datetime ( 2020 , 5 , 1 , 15 , 0 ) Parameters: Name Type Description Default tm int time in YYYYMMDDHHmm format required Returns: Type Description datetime.datetime \u8f6c\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) is_bar_closed ( frame , ft ) classmethod \u00b6 \u5224\u65ad frame \u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Parameters: Name Type Description Default frame bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 required ft FrameType bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b required Returns: Type Description bool \u662f\u5426\u5df2\u7ecf\u6536\u76d8 Source code in omicron/models/timeframe.py @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame is_closing_call_auction_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 is_open_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 ]) >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-1 14:59' ) . naive ) False >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-3 14:59' ) . naive ) True Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] is_opening_call_auction_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 is_trade_day ( dt ) classmethod \u00b6 \u5224\u65ad dt \u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . is_trade_day ( arrow . get ( '2020-1-1' )) False Parameters: Name Type Description Default dt required Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames last_min_frame ( day , frame_type ) classmethod \u00b6 \u83b7\u53d6 day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe\u3002 Examples: >>> TimeFrame . last_min_frame ( arrow . get ( '2020-1-5' ) . date (), FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 3 , 15 , 0 ) Parameters: Name Type Description Default day Union[str, Arrow, datetime.date] required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe Source code in omicron/models/timeframe.py @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) minute_frames_floor ( ticks , moment ) classmethod \u00b6 \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [ 600 , 630 , 660 , 690 , 810 , 840 , 870 , 900 ] >>> TimeFrame . minute_frames_floor ( ticks , 545 ) ( 900 , - 1 ) >>> TimeFrame . minute_frames_floor ( ticks , 600 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 605 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 899 ) ( 870 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 900 ) ( 900 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 905 ) ( 900 , 0 ) Parameters: Name Type Description Default ticks np.array or list frames\u523b\u5ea6 required moment int \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 required Returns: Type Description Tuple[int, int] tuple, the first is the new moment, the second is carry-on Source code in omicron/models/timeframe.py @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 month_shift ( start , offset ) classmethod \u00b6 \u6c42 start \u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06 start \u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame . month_frames = np . array ([ 20150130 , 20150227 , 20150331 , 20150430 ]) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-26' ) . date (), 0 ) datetime . date ( 2015 , 1 , 30 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-27' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 1 ) datetime . date ( 2015 , 3 , 31 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) replace_date ( dtm , dt ) classmethod \u00b6 \u5c06 dtm \u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a dt \u6307\u5b9a\u7684\u65e5\u671f Examples: >>> TimeFrame . replace_date ( arrow . get ( '2020-1-1 13:49' ) . datetime , datetime . date ( 2019 , 1 , 1 )) datetime . datetime ( 2019 , 1 , 1 , 13 , 49 ) Parameters: Name Type Description Default dtm datetime.datetime [description] required dt datetime.date [description] required Returns: Type Description datetime.datetime \u53d8\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) resample_frames ( trade_days , frame_type ) classmethod \u00b6 \u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Parameters: Name Type Description Default trade_days Iterable [description] required frame_type FrameType [description] required Returns: Type Description List[int] \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a Source code in omicron/models/timeframe.py @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) service_degrade () classmethod \u00b6 \u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 Source code in omicron/models/timeframe.py @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) shift ( moment , n , frame_type ) classmethod \u00b6 \u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a frame_type \u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: day_shift week_shift month_shift Examples: >>> TimeFrame . shift ( datetime . date ( 2020 , 1 , 3 ), 1 , FrameType . DAY ) datetime . date ( 2020 , 1 , 6 ) >>> TimeFrame . shift ( datetime . datetime ( 2020 , 1 , 6 , 11 ), 1 , FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 6 , 11 , 30 ) Parameters: Name Type Description Default moment Union[Arrow, datetime.date, datetime.datetime] required n int required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] \u79fb\u4f4d\u540e\u7684Frame Source code in omicron/models/timeframe.py @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) time2int ( tm ) classmethod \u00b6 \u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame . time2int ( datetime . datetime ( 2020 , 5 , 1 , 15 )) 202005011500 Parameters: Name Type Description Default tm Union[datetime.datetime, Arrow] required Returns: Type Description int \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 Source code in omicron/models/timeframe.py @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) week_shift ( start , offset ) classmethod \u00b6 \u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 omicron.models.timeframe.TimeFrame.day_shift Examples: >>> TimeFrame . week_frames = np . array ([ 20200103 , 20200110 , 20200117 , 20200123 , 20200207 , 20200214 ]) >>> moment = arrow . get ( '2020-1-21' ) . date () >>> TimeFrame . week_shift ( moment , 1 ) datetime . date ( 2020 , 1 , 23 ) >>> TimeFrame . week_shift ( moment , 0 ) datetime . date ( 2020 , 1 , 17 ) >>> TimeFrame . week_shift ( moment , - 1 ) datetime . date ( 2020 , 1 , 10 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) rendering: heading_level: 1","title":"timeframe"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame","text":"Source code in omicron/models/timeframe.py class TimeFrame : minute_level_frames = [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ] day_level_frames = [ FrameType . DAY , FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR , ] ticks = { FrameType . MIN1 : [ i for i in itertools . chain ( range ( 571 , 691 ), range ( 781 , 901 ))], FrameType . MIN5 : [ i for i in itertools . chain ( range ( 575 , 695 , 5 ), range ( 785 , 905 , 5 )) ], FrameType . MIN15 : [ i for i in itertools . chain ( range ( 585 , 705 , 15 ), range ( 795 , 915 , 15 )) ], FrameType . MIN30 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1000\" , \"1030\" , \"1100\" , \"1130\" , \"1330\" , \"1400\" , \"1430\" , \"1500\" ] ], FrameType . MIN60 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1030\" , \"1130\" , \"1400\" , \"1500\" ] ], } day_frames = None week_frames = None month_frames = None quarter_frames = None year_frames = None @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) @classmethod async def _load_calendar ( cls ): \"\"\"\u4ece\u6570\u636e\u7f13\u5b58\u4e2d\u52a0\u8f7d\u66f4\u65b0\u65e5\u5386\"\"\" from omicron import cache names = [ \"day_frames\" , \"week_frames\" , \"month_frames\" , \"quarter_frames\" , \"year_frames\" , ] for name , frame_type in zip ( names , cls . day_level_frames ): key = f \"calendar: { frame_type . value } \" result = await cache . security . lrange ( key , 0 , - 1 ) if result is not None and len ( result ): frames = [ int ( x ) for x in result ] setattr ( cls , name , np . array ( frames )) else : # pragma: no cover raise DataNotReadyError ( f \"calendar data is not ready: { name } missed\" ) @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 @classmethod async def save_calendar ( cls , trade_days ): # avoid circular import from omicron import cache for ft in [ FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR ]: days = cls . resample_frames ( trade_days , ft ) frames = [ cls . date2int ( x ) for x in days ] key = f \"calendar: { ft . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () frames = [ cls . date2int ( x ) for x in trade_days ] key = f \"calendar: { FrameType . DAY . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () @classmethod async def remove_calendar ( cls ): # avoid circular import from omicron import cache for ft in cls . day_level_frames : key = f \"calendar: { ft . value } \" await cache . security . delete ( key ) @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day","title":"TimeFrame"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.ceiling","text":"\u6c42 moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982 moment \u4e3a14:59\u5206\uff0c\u5982\u679c frame_type \u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Examples: >>> TimeFrame . day_frames = [ 20050104 , 20050105 , 20050106 , 20050107 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . week_frames = [ 20050107 , 20050114 , 20050121 , 20050128 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . month_frames = [ 20050131 , 20050228 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 1 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 15 , 0 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . ceiling ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment datetime.datetime [description] required frame_type FrameType [description] required Returns: Type Description Frame moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c Source code in omicron/models/timeframe.py @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type )","title":"ceiling()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.combine_time","text":"\u7528 date \u6307\u5b9a\u7684\u65e5\u671f\u4e0e hour , minute , second \u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame . combine_time ( datetime . date ( 2020 , 1 , 1 ), 14 , 30 ) datetime . datetime ( 2020 , 1 , 1 , 14 , 30 ) Parameters: Name Type Description Default date [description] required hour [description] required minute [description]. Defaults to 0. 0 second [description]. Defaults to 0. 0 microsecond [description]. Defaults to 0. 0 Returns: Type Description datetime.datetime \u5408\u6210\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond )","title":"combine_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_day_frames","text":"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime . date ( 2019 , 12 , 21 ) >>> end = datetime . date ( 2019 , 12 , 21 ) >>> TimeFrame . day_frames = [ 20191219 , 20191220 , 20191223 , 20191224 , 20191225 ] >>> TimeFrame . count_day_frames ( start , end ) 1 >>> # non-trade days are removed >>> TimeFrame . day_frames = [ 20200121 , 20200122 , 20200123 , 20200203 , 20200204 , 20200205 ] >>> start = datetime . date ( 2020 , 1 , 23 ) >>> end = datetime . date ( 2020 , 2 , 4 ) >>> TimeFrame . count_day_frames ( start , end ) 3 Parameters: Name Type Description Default start Union[datetime.date, Arrow] required end Union[datetime.date, Arrow] required Returns: Type Description int count of days Source code in omicron/models/timeframe.py @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end ))","title":"count_day_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_frames","text":"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: count_day_frames count_week_frames count_month_frames Parameters: Name Type Description Default start start frame required end end frame required frame_type the type of frame required Exceptions: Type Description ValueError \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: Type Description int \u4ecestart\u5230end\u7684\u5e27\u6570 Source code in omicron/models/timeframe.py @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" )","title":"count_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_month_frames","text":"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int months between start and end Source code in omicron/models/timeframe.py @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end ))","title":"count_month_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_quarter_frames","text":"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int quarters between start and end Source code in omicron/models/timeframe.py @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end ))","title":"count_quarter_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_week_frames","text":"calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int count of weeks Source code in omicron/models/timeframe.py @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end ))","title":"count_week_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_year_frames","text":"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int years between start and end Source code in omicron/models/timeframe.py @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end ))","title":"count_year_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.date2int","text":"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame . date2int ( datetime . date ( 2020 , 5 , 1 )) 20200501 Parameters: Name Type Description Default d Union[datetime.datetime, datetime.date, Arrow] date required Returns: Type Description int \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 Source code in omicron/models/timeframe.py @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" )","title":"date2int()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.day_shift","text":"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . day_frames = [ 20191212 , 20191213 , 20191216 , 20191217 , 20191218 , 20191219 ] >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 1 ) datetime . date ( 2019 , 12 , 16 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 1 ) datetime . date ( 2019 , 12 , 16 ) Parameters: Name Type Description Default start datetime.date the origin day required offset int days to shift, can be negative required Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset ))","title":"day_shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.first_min_frame","text":"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a frame_type \u7684 frame \u3002 Examples: >>> TimeFrame . day_frames = np . array ([ 20191227 , 20191230 , 20191231 , 20200102 , 20200103 ]) >>> TimeFrame . first_min_frame ( '2019-12-31' , FrameType . MIN1 ) datetime . datetime ( 2019 , 12 , 31 , 9 , 31 ) Parameters: Name Type Description Default day Union[str, Arrow, Frame] which day? required frame_type FrameType which frame_type? required Returns: Type Description Union[datetime.date, datetime.datetime] day \u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 Source code in omicron/models/timeframe.py @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" )","title":"first_min_frame()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.floor","text":"\u6c42 moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c moment \u4e3a10:37\uff0c\u5219\u5f53 frame_type \u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame . day_frames = np . array ([ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ]) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 7 , 14 , 59 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 6 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 13 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 2 , 27 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 30 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . floor ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment Frame required frame_type FrameType required Returns: Type Description Frame moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c Source code in omicron/models/timeframe.py @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored )","title":"floor()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.frame_len","text":"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame . frame_len ( FrameType . MIN5 ) 5 Parameters: Name Type Description Default frame_type FrameType required Returns: Type Description int \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 Source code in omicron/models/timeframe.py @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240","title":"frame_len()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frame_scope","text":"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Parameters: Name Type Description Default frame \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required ft FrameType \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH required Returns: Type Description Tuple[Frame, Frame] \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 Source code in omicron/models/timeframe.py @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end","title":"get_frame_scope()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frames","text":"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7 floor \u6216\u8005 ceiling \u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c\u503c Examples: >>> start = arrow . get ( '2020-1-13 10:00' ) . naive >>> end = arrow . get ( '2020-1-13 13:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200109 , 20200110 , 20200113 , 20200114 , 20200115 , 20200116 ]) >>> TimeFrame . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Parameters: Name Type Description Default start Frame required end Frame required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type )","title":"get_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frames_by_count","text":"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06 end \u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c Examples: >>> end = arrow . get ( '2020-1-6 14:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 , 20200109 ]) >>> TimeFrame . get_frames_by_count ( end , 2 , FrameType . MIN30 ) [ 202001061400 , 202001061430 ] Parameters: Name Type Description Default end Arrow required n int required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" )","title":"get_frames_by_count()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_previous_trade_day","text":"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Parameters: Name Type Description Default now \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required Returns: Type Description datetime.date \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 Source code in omicron/models/timeframe.py @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day","title":"get_previous_trade_day()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_ticks","text":"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame . month_frames = np . array ([ 20050131 , 20050228 , 20050331 ]) >>> TimeFrame . get_ticks ( FrameType . MONTH )[: 3 ] array ([ 20050131 , 20050228 , 20050331 ]) Parameters: Name Type Description Default frame_type [description] required Exceptions: Type Description ValueError [description] Returns: Type Description Union[List, np.array] \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame Source code in omicron/models/timeframe.py @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" )","title":"get_ticks()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.init","text":"\u521d\u59cb\u5316\u65e5\u5386 Source code in omicron/models/timeframe.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar ()","title":"init()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.int2date","text":"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame . int2date ( 20200501 ) datetime . date ( 2020 , 5 , 1 ) Parameters: Name Type Description Default d Union[int, str] YYYYMMDD\u8868\u793a\u7684\u65e5\u671f required Returns: Type Description datetime.date \u8f6c\u6362\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :]))","title":"int2date()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.int2time","text":"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a datetime \u7c7b\u578b\u8868\u793a Examples: >>> TimeFrame . int2time ( 202005011500 ) datetime . datetime ( 2020 , 5 , 1 , 15 , 0 ) Parameters: Name Type Description Default tm int time in YYYYMMDDHHmm format required Returns: Type Description datetime.datetime \u8f6c\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) )","title":"int2time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_bar_closed","text":"\u5224\u65ad frame \u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Parameters: Name Type Description Default frame bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 required ft FrameType bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b required Returns: Type Description bool \u662f\u5426\u5df2\u7ecf\u6536\u76d8 Source code in omicron/models/timeframe.py @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame","title":"is_bar_closed()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_closing_call_auction_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60","title":"is_closing_call_auction_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_open_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 ]) >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-1 14:59' ) . naive ) False >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-3 14:59' ) . naive ) True Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ]","title":"is_open_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_opening_call_auction_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25","title":"is_opening_call_auction_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_trade_day","text":"\u5224\u65ad dt \u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . is_trade_day ( arrow . get ( '2020-1-1' )) False Parameters: Name Type Description Default dt required Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames","title":"is_trade_day()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.last_min_frame","text":"\u83b7\u53d6 day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe\u3002 Examples: >>> TimeFrame . last_min_frame ( arrow . get ( '2020-1-5' ) . date (), FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 3 , 15 , 0 ) Parameters: Name Type Description Default day Union[str, Arrow, datetime.date] required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe Source code in omicron/models/timeframe.py @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" )","title":"last_min_frame()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.minute_frames_floor","text":"\u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [ 600 , 630 , 660 , 690 , 810 , 840 , 870 , 900 ] >>> TimeFrame . minute_frames_floor ( ticks , 545 ) ( 900 , - 1 ) >>> TimeFrame . minute_frames_floor ( ticks , 600 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 605 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 899 ) ( 870 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 900 ) ( 900 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 905 ) ( 900 , 0 ) Parameters: Name Type Description Default ticks np.array or list frames\u523b\u5ea6 required moment int \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 required Returns: Type Description Tuple[int, int] tuple, the first is the new moment, the second is carry-on Source code in omicron/models/timeframe.py @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0","title":"minute_frames_floor()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.month_shift","text":"\u6c42 start \u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06 start \u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame . month_frames = np . array ([ 20150130 , 20150227 , 20150331 , 20150430 ]) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-26' ) . date (), 0 ) datetime . date ( 2015 , 1 , 30 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-27' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 1 ) datetime . date ( 2015 , 3 , 31 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset ))","title":"month_shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.replace_date","text":"\u5c06 dtm \u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a dt \u6307\u5b9a\u7684\u65e5\u671f Examples: >>> TimeFrame . replace_date ( arrow . get ( '2020-1-1 13:49' ) . datetime , datetime . date ( 2019 , 1 , 1 )) datetime . datetime ( 2019 , 1 , 1 , 13 , 49 ) Parameters: Name Type Description Default dtm datetime.datetime [description] required dt datetime.date [description] required Returns: Type Description datetime.datetime \u53d8\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond )","title":"replace_date()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.resample_frames","text":"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Parameters: Name Type Description Default trade_days Iterable [description] required frame_type FrameType [description] required Returns: Type Description List[int] \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a Source code in omicron/models/timeframe.py @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" )","title":"resample_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.service_degrade","text":"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 Source code in omicron/models/timeframe.py @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v ))","title":"service_degrade()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.shift","text":"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a frame_type \u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: day_shift week_shift month_shift Examples: >>> TimeFrame . shift ( datetime . date ( 2020 , 1 , 3 ), 1 , FrameType . DAY ) datetime . date ( 2020 , 1 , 6 ) >>> TimeFrame . shift ( datetime . datetime ( 2020 , 1 , 6 , 11 ), 1 , FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 6 , 11 , 30 ) Parameters: Name Type Description Default moment Union[Arrow, datetime.date, datetime.datetime] required n int required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] \u79fb\u4f4d\u540e\u7684Frame Source code in omicron/models/timeframe.py @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" )","title":"shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.time2int","text":"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame . time2int ( datetime . datetime ( 2020 , 5 , 1 , 15 )) 202005011500 Parameters: Name Type Description Default tm Union[datetime.datetime, Arrow] required Returns: Type Description int \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 Source code in omicron/models/timeframe.py @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" )","title":"time2int()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.week_shift","text":"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 omicron.models.timeframe.TimeFrame.day_shift Examples: >>> TimeFrame . week_frames = np . array ([ 20200103 , 20200110 , 20200117 , 20200123 , 20200207 , 20200214 ]) >>> moment = arrow . get ( '2020-1-21' ) . date () >>> TimeFrame . week_shift ( moment , 1 ) datetime . date ( 2020 , 1 , 23 ) >>> TimeFrame . week_shift ( moment , 0 ) datetime . date ( 2020 , 1 , 17 ) >>> TimeFrame . week_shift ( moment , - 1 ) datetime . date ( 2020 , 1 , 10 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) rendering: heading_level: 1","title":"week_shift()"},{"location":"api/triggers/","text":"\u5728apscheduler.triggers\u7684\u57fa\u7840\u4e0a\u63d0\u4f9b\u4e86FrameTrigger\u548cIntervalTrigger\uff0c\u4f7f\u5f97\u5b83\u4eec\u53ea\u5728\u4ea4\u6613\u65e5\uff08\u6216\u8005 \u57fa\u4e8e\u4ea4\u6613\u65e5+\u5ef6\u65f6\uff09\u65f6\u6fc0\u53d1\u3002 FrameTrigger ( BaseTrigger ) \u00b6 A cron like trigger fires on each valid Frame Source code in omicron/core/triggers.py class FrameTrigger ( BaseTrigger ): \"\"\" A cron like trigger fires on each valid Frame \"\"\" def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . frame_type . value } : { self . jitter } \" def get_next_fire_time ( self , previous_fire_time : Union [ datetime . date , datetime . datetime ], now : Union [ datetime . date , datetime . datetime ], ): \"\"\"\"\"\" ft = self . frame_type # `now` is timezone aware, while ceiling isn't now = now . replace ( tzinfo = None ) next_tick = now next_frame = TimeFrame . ceiling ( now , ft ) while next_tick <= now : if ft in TimeFrame . day_level_frames : next_tick = TimeFrame . combine_time ( next_frame , 15 ) + self . jitter else : next_tick = next_frame + self . jitter if next_tick > now : tz = tzlocal . get_localzone () return next_tick . astimezone ( tz ) else : next_frame = TimeFrame . shift ( next_frame , 1 , ft ) __init__ ( self , frame_type , jitter = None ) special \u00b6 \u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a r\"([-]?)(\\d+)([mshd])\" \uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a m (\u5206\u949f), s (\u79d2), h \uff08\u5c0f\u65f6\uff09, d (\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger ( FrameType . MIN30 , '-15s' ) < omicron . core . triggers . FrameTrigger object at 0 x ...> Parameters: Name Type Description Default frame_type Union[str, coretypes.types.FrameType] required jitter str \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 None Source code in omicron/core/triggers.py def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) TradeTimeIntervalTrigger ( BaseTrigger ) \u00b6 \u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger Source code in omicron/core/triggers.py class TradeTimeIntervalTrigger ( BaseTrigger ): \"\"\"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger\"\"\" def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . interval . seconds } \" def get_next_fire_time ( self , previous_fire_time : Optional [ datetime . datetime ], now : Optional [ datetime . datetime ], ): \"\"\"\"\"\" if previous_fire_time is not None : fire_time = previous_fire_time + self . interval else : fire_time = now if TimeFrame . date2int ( fire_time . date ()) not in TimeFrame . day_frames : ft = TimeFrame . day_shift ( now , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time minutes = fire_time . hour * 60 + fire_time . minute if minutes < 570 : fire_time = fire_time . replace ( hour = 9 , minute = 30 , second = 0 , microsecond = 0 ) elif 690 < minutes < 780 : fire_time = fire_time . replace ( hour = 13 , minute = 0 , second = 0 , microsecond = 0 ) elif minutes > 900 : ft = TimeFrame . day_shift ( fire_time , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time __init__ ( self , interval ) special \u00b6 \u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a r\"(\\d+)([mshd])\" \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 interval \u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Parameters: Name Type Description Default interval [description] required Exceptions: Type Description ValueError [description] Source code in omicron/core/triggers.py def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval )","title":"Triggers"},{"location":"api/triggers/#omicron.core.triggers.FrameTrigger","text":"A cron like trigger fires on each valid Frame Source code in omicron/core/triggers.py class FrameTrigger ( BaseTrigger ): \"\"\" A cron like trigger fires on each valid Frame \"\"\" def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . frame_type . value } : { self . jitter } \" def get_next_fire_time ( self , previous_fire_time : Union [ datetime . date , datetime . datetime ], now : Union [ datetime . date , datetime . datetime ], ): \"\"\"\"\"\" ft = self . frame_type # `now` is timezone aware, while ceiling isn't now = now . replace ( tzinfo = None ) next_tick = now next_frame = TimeFrame . ceiling ( now , ft ) while next_tick <= now : if ft in TimeFrame . day_level_frames : next_tick = TimeFrame . combine_time ( next_frame , 15 ) + self . jitter else : next_tick = next_frame + self . jitter if next_tick > now : tz = tzlocal . get_localzone () return next_tick . astimezone ( tz ) else : next_frame = TimeFrame . shift ( next_frame , 1 , ft )","title":"FrameTrigger"},{"location":"api/triggers/#omicron.core.triggers.FrameTrigger.__init__","text":"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a r\"([-]?)(\\d+)([mshd])\" \uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a m (\u5206\u949f), s (\u79d2), h \uff08\u5c0f\u65f6\uff09, d (\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger ( FrameType . MIN30 , '-15s' ) < omicron . core . triggers . FrameTrigger object at 0 x ...> Parameters: Name Type Description Default frame_type Union[str, coretypes.types.FrameType] required jitter str \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 None Source code in omicron/core/triggers.py def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" )","title":"__init__()"},{"location":"api/triggers/#omicron.core.triggers.TradeTimeIntervalTrigger","text":"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger Source code in omicron/core/triggers.py class TradeTimeIntervalTrigger ( BaseTrigger ): \"\"\"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger\"\"\" def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . interval . seconds } \" def get_next_fire_time ( self , previous_fire_time : Optional [ datetime . datetime ], now : Optional [ datetime . datetime ], ): \"\"\"\"\"\" if previous_fire_time is not None : fire_time = previous_fire_time + self . interval else : fire_time = now if TimeFrame . date2int ( fire_time . date ()) not in TimeFrame . day_frames : ft = TimeFrame . day_shift ( now , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time minutes = fire_time . hour * 60 + fire_time . minute if minutes < 570 : fire_time = fire_time . replace ( hour = 9 , minute = 30 , second = 0 , microsecond = 0 ) elif 690 < minutes < 780 : fire_time = fire_time . replace ( hour = 13 , minute = 0 , second = 0 , microsecond = 0 ) elif minutes > 900 : ft = TimeFrame . day_shift ( fire_time , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time","title":"TradeTimeIntervalTrigger"},{"location":"api/triggers/#omicron.core.triggers.TradeTimeIntervalTrigger.__init__","text":"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a r\"(\\d+)([mshd])\" \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 interval \u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Parameters: Name Type Description Default interval [description] required Exceptions: Type Description ValueError [description] Source code in omicron/core/triggers.py def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval )","title":"__init__()"},{"location":"api/dal/flux/","text":"Flux - the query language builder for influxdb \u00b6 Helper functions for building flux query expression Source code in omicron/dal/influx/flux.py class Flux ( object ): \"\"\"Helper functions for building flux query expression\"\"\" EPOCH_START = datetime . datetime ( 1970 , 1 , 1 , 0 , 0 , 0 ) def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols def __str__ ( self ): return self . _compose () def __repr__ ( self ) -> str : return f \"< { self . __class__ . __name__ } >: \\n { self . _compose () } \" def _compose ( self ): \"\"\"\u5c06\u6240\u6709\u8868\u8fbe\u5f0f\u5408\u5e76\u4e3a\u4e00\u4e2a\u8868\u8fbe\u5f0f\"\"\" if not all ( [ \"bucket\" in self . expressions , \"measurement\" in self . expressions , \"range\" in self . expressions , ] ): raise AssertionError ( \"bucket, measurement and range must be set\" ) expr = [ self . expressions [ k ] for k in ( \"bucket\" , \"range\" , \"measurement\" )] if self . expressions . get ( \"tags\" ): expr . append ( self . expressions [ \"tags\" ]) if self . expressions . get ( \"fields\" ): expr . append ( self . expressions [ \"fields\" ]) if \"drop\" not in self . expressions and self . no_sys_cols : self . drop_sys_cols () if self . expressions . get ( \"drop\" ): expr . append ( self . expressions [ \"drop\" ]) if self . _auto_pivot and \"pivot\" not in self . expressions : self . pivot () if self . expressions . get ( \"pivot\" ): expr . append ( self . expressions [ \"pivot\" ]) if self . expressions . get ( \"group\" ): expr . append ( self . expressions [ \"group\" ]) if self . expressions . get ( \"sort\" ): expr . append ( self . expressions [ \"sort\" ]) if self . expressions . get ( \"limit\" ): expr . append ( self . expressions [ \"limit\" ]) # influxdb\u9ed8\u8ba4\u6309\u5347\u5e8f\u6392\u5217\uff0c\u4f46last_n\u67e5\u8be2\u7684\u7ed3\u679c\u5219\u5fc5\u7136\u662f\u964d\u5e8f\u7684\uff0c\u6240\u4ee5\u8fd8\u9700\u8981\u518d\u6b21\u6392\u5e8f if self . _last_n : expr . append ( \" \\n \" . join ( [ f ' |> top(n: { self . _last_n } , columns: [\"_time\"])' , ' |> sort(columns: [\"_time\"], desc: false)' , ] ) ) return \" \\n \" . join ( expr ) def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ])) @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self @property def cols ( self ) -> List [ str ]: \"\"\"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: the columns name of the return records \"\"\" # fixme: if keep in expression, then return group key + tag key + value key # if keep not in expression, then stream, table, _time, ... return sorted ( self . _cols ) def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols ) cols : List [ str ] property readonly \u00b6 the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: Type Description List[str] the columns name of the return records __init__ ( self , auto_pivot = True , no_sys_cols = True ) special \u00b6 \u521d\u59cb\u5316Flux\u5bf9\u8c61 Parameters: Name Type Description Default auto_pivot \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. True no_sys_cols \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003 drop_sys_cols True Source code in omicron/dal/influx/flux.py def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols bucket ( self , bucket ) \u00b6 add bucket to query expression Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61 Source code in omicron/dal/influx/flux.py def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self delete ( self , measurement , stop , tags = {}, start = None , precision = 's' ) \u00b6 \u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to delete-predicate , delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7 tags \u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Parameters: Name Type Description Default measurement [description] required stop [description] required tags \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 {} start \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. None precision \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d 's' Returns: Type Description dict \u5220\u9664\u8bed\u53e5 Source code in omicron/dal/influx/flux.py def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command drop ( self , cols ) \u00b6 use this to drop columns before return result Parameters: Name Type Description Default cols the name of columns to be dropped required Returns: Type Description Flux Flux object, to support pipe operation Source code in omicron/dal/influx/flux.py def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self drop_sys_cols ( self , cols = None ) \u00b6 use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in cols , before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in cols parameter. Parameters: Name Type Description Default cols the extra columns to be dropped None Returns: Type Description Flux Flux query object Source code in omicron/dal/influx/flux.py def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols ) fields ( self , fields , reserve_time_stamp = True ) \u00b6 \u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default fields List \u5f85\u67e5\u8be2\u7684field\u5217\u8868 required reserve_time_stamp bool \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233 _time \uff0c\u9ed8\u8ba4\u4e3aTrue True Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self format_time ( tm , precision = 's' , shift_forward = False ) classmethod \u00b6 \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06 end \u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165 shift_forward = True \u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux . format_time ( datetime . date ( 2019 , 1 , 1 )) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux . format_time ( datetime . datetime ( 1978 , 7 , 8 , 12 , 34 , 56 , 123456 ), precision = \"ms\" ) '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux . format_time ( datetime . date ( 1978 , 7 , 8 ), shift_forward = True ) '1978-07-08T00:00:01Z' Parameters: Name Type Description Default tm \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' 's' shift_forward \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 False Returns: Type Description str \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 Source code in omicron/dal/influx/flux.py @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" group ( self , by ) \u00b6 [summary] Returns: Type Description Flux [description] Source code in omicron/dal/influx/flux.py def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self latest ( self , n ) \u00b6 \u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Parameters: Name Type Description Default n int \u6700\u540en\u6761\u6570\u636e required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self limit ( self , limit ) \u00b6 \u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default limit int \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self measurement ( self , measurement ) \u00b6 add measurement filter to query Exceptions: Type Description DuplicateOperationError \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self pivot ( self , row_keys = [ '_time' ], column_keys = [ '_field' ], value_column = '_value' ) \u00b6 pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 pivot Parameters: Name Type Description Default row_keys List[str] \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] ['_time'] column_keys \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] ['_field'] value_column str \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" '_value' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self range ( self , start , end , right_close = True , precision = 's' ) \u00b6 \u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e precision \u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53 right_close \u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539 end \u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4 end \u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b end \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default start Union[datetime.date, datetime.datetime] \u5f00\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4 required right_close \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 True precision \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self sort ( self , by = None , desc = False ) \u00b6 \u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e influxdb doc , \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Parameters: Name Type Description Default by List[str] \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 None Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self tags ( self , tags ) \u00b6 \u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528 contains \u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528 or \u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or )\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default tags tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 required Examples: >>> flux = Flux () >>> flux . tags ({ \"code\" : [ \"000001\" , \"000002\" ], \"name\" : [ \"\u6d66\u53d1\u94f6\u884c\" ]}) . expressions [ \"tags\" ] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self to_timestamp ( tm , precision = 's' ) classmethod \u00b6 \u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c tm \u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Parameters: Name Type Description Default tm Union[datetime.date, datetime.datetime] \u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description int \u65f6\u95f4\u6233 Source code in omicron/dal/influx/flux.py @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ]))","title":"Flux"},{"location":"api/dal/flux/#flux---the-query-language-builder-for-influxdb","text":"Helper functions for building flux query expression Source code in omicron/dal/influx/flux.py class Flux ( object ): \"\"\"Helper functions for building flux query expression\"\"\" EPOCH_START = datetime . datetime ( 1970 , 1 , 1 , 0 , 0 , 0 ) def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols def __str__ ( self ): return self . _compose () def __repr__ ( self ) -> str : return f \"< { self . __class__ . __name__ } >: \\n { self . _compose () } \" def _compose ( self ): \"\"\"\u5c06\u6240\u6709\u8868\u8fbe\u5f0f\u5408\u5e76\u4e3a\u4e00\u4e2a\u8868\u8fbe\u5f0f\"\"\" if not all ( [ \"bucket\" in self . expressions , \"measurement\" in self . expressions , \"range\" in self . expressions , ] ): raise AssertionError ( \"bucket, measurement and range must be set\" ) expr = [ self . expressions [ k ] for k in ( \"bucket\" , \"range\" , \"measurement\" )] if self . expressions . get ( \"tags\" ): expr . append ( self . expressions [ \"tags\" ]) if self . expressions . get ( \"fields\" ): expr . append ( self . expressions [ \"fields\" ]) if \"drop\" not in self . expressions and self . no_sys_cols : self . drop_sys_cols () if self . expressions . get ( \"drop\" ): expr . append ( self . expressions [ \"drop\" ]) if self . _auto_pivot and \"pivot\" not in self . expressions : self . pivot () if self . expressions . get ( \"pivot\" ): expr . append ( self . expressions [ \"pivot\" ]) if self . expressions . get ( \"group\" ): expr . append ( self . expressions [ \"group\" ]) if self . expressions . get ( \"sort\" ): expr . append ( self . expressions [ \"sort\" ]) if self . expressions . get ( \"limit\" ): expr . append ( self . expressions [ \"limit\" ]) # influxdb\u9ed8\u8ba4\u6309\u5347\u5e8f\u6392\u5217\uff0c\u4f46last_n\u67e5\u8be2\u7684\u7ed3\u679c\u5219\u5fc5\u7136\u662f\u964d\u5e8f\u7684\uff0c\u6240\u4ee5\u8fd8\u9700\u8981\u518d\u6b21\u6392\u5e8f if self . _last_n : expr . append ( \" \\n \" . join ( [ f ' |> top(n: { self . _last_n } , columns: [\"_time\"])' , ' |> sort(columns: [\"_time\"], desc: false)' , ] ) ) return \" \\n \" . join ( expr ) def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ])) @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self @property def cols ( self ) -> List [ str ]: \"\"\"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: the columns name of the return records \"\"\" # fixme: if keep in expression, then return group key + tag key + value key # if keep not in expression, then stream, table, _time, ... return sorted ( self . _cols ) def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols )","title":"Flux - the query language builder for influxdb"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.cols","text":"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: Type Description List[str] the columns name of the return records","title":"cols"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.__init__","text":"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Parameters: Name Type Description Default auto_pivot \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. True no_sys_cols \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003 drop_sys_cols True Source code in omicron/dal/influx/flux.py def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols","title":"__init__()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.bucket","text":"add bucket to query expression Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61 Source code in omicron/dal/influx/flux.py def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self","title":"bucket()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.delete","text":"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to delete-predicate , delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7 tags \u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Parameters: Name Type Description Default measurement [description] required stop [description] required tags \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 {} start \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. None precision \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d 's' Returns: Type Description dict \u5220\u9664\u8bed\u53e5 Source code in omicron/dal/influx/flux.py def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command","title":"delete()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.drop","text":"use this to drop columns before return result Parameters: Name Type Description Default cols the name of columns to be dropped required Returns: Type Description Flux Flux object, to support pipe operation Source code in omicron/dal/influx/flux.py def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self","title":"drop()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.drop_sys_cols","text":"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in cols , before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in cols parameter. Parameters: Name Type Description Default cols the extra columns to be dropped None Returns: Type Description Flux Flux query object Source code in omicron/dal/influx/flux.py def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols )","title":"drop_sys_cols()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.fields","text":"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default fields List \u5f85\u67e5\u8be2\u7684field\u5217\u8868 required reserve_time_stamp bool \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233 _time \uff0c\u9ed8\u8ba4\u4e3aTrue True Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self","title":"fields()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.format_time","text":"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06 end \u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165 shift_forward = True \u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux . format_time ( datetime . date ( 2019 , 1 , 1 )) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux . format_time ( datetime . datetime ( 1978 , 7 , 8 , 12 , 34 , 56 , 123456 ), precision = \"ms\" ) '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux . format_time ( datetime . date ( 1978 , 7 , 8 ), shift_forward = True ) '1978-07-08T00:00:01Z' Parameters: Name Type Description Default tm \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' 's' shift_forward \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 False Returns: Type Description str \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 Source code in omicron/dal/influx/flux.py @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\"","title":"format_time()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.group","text":"[summary] Returns: Type Description Flux [description] Source code in omicron/dal/influx/flux.py def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self","title":"group()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.latest","text":"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Parameters: Name Type Description Default n int \u6700\u540en\u6761\u6570\u636e required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self","title":"latest()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.limit","text":"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default limit int \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self","title":"limit()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.measurement","text":"add measurement filter to query Exceptions: Type Description DuplicateOperationError \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self","title":"measurement()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.pivot","text":"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 pivot Parameters: Name Type Description Default row_keys List[str] \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] ['_time'] column_keys \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] ['_field'] value_column str \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" '_value' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self","title":"pivot()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.range","text":"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e precision \u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53 right_close \u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539 end \u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4 end \u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b end \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default start Union[datetime.date, datetime.datetime] \u5f00\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4 required right_close \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 True precision \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self","title":"range()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.sort","text":"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e influxdb doc , \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Parameters: Name Type Description Default by List[str] \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 None Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self","title":"sort()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.tags","text":"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528 contains \u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528 or \u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or )\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default tags tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 required Examples: >>> flux = Flux () >>> flux . tags ({ \"code\" : [ \"000001\" , \"000002\" ], \"name\" : [ \"\u6d66\u53d1\u94f6\u884c\" ]}) . expressions [ \"tags\" ] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self","title":"tags()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.to_timestamp","text":"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c tm \u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Parameters: Name Type Description Default tm Union[datetime.date, datetime.datetime] \u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description int \u65f6\u95f4\u6233 Source code in omicron/dal/influx/flux.py @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ]))","title":"to_timestamp()"},{"location":"api/dal/influxclient/","text":"InfluxClient - the performanct async client for influxdb \u00b6 Source code in omicron/dal/influx/influxclient.py class InfluxClient : def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" ) async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" ) __init__ ( self , url , token , bucket , org = None , enable_compress = False , chunk_size = 5000 , precision = 's' ) special \u00b6 [summary] Parameters: Name Type Description Default url [type] [description] required token [type] [description] required bucket [type] [description] required org [type] [description]. Defaults to None. None enable_compress [type] [description]. Defaults to False. False chunk_size int number of lines to be saved in one request 5000 precision str \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 's' Source code in omicron/dal/influx/influxclient.py def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } create_bucket ( self , description = None , retention_rules = None , org_id = None ) async \u00b6 \u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default description \u6307\u5b9abucket\u7684\u63cf\u8ff0 None org_id str \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 None Exceptions: Type Description InfluxSchemaError \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: Type Description str \u65b0\u521b\u5efa\u7684bucket\u7684id Source code in omicron/dal/influx/influxclient.py async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] delete ( self , measurement , stop , tags = {}, start = None , precision = 's' ) async \u00b6 \u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1 Flux.delete \u3002 Parameters: Name Type Description Default measurement str \u6307\u5b9ameasurement\u540d\u5b57 required stop datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 required start datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START None tags Optional[Dict[str, str]] \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 {} precision str \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 's' Exceptions: Type Description InfluxDeleteError \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 Source code in omicron/dal/influx/influxclient.py async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) delete_bucket ( self , bucket_id = None ) async \u00b6 \u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default bucket_id str \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 None Source code in omicron/dal/influx/influxclient.py async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) drop_measurement ( self , measurement ) async \u00b6 \u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 Source code in omicron/dal/influx/influxclient.py async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) list_buckets ( self ) async \u00b6 \u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: Type Description list of buckets, each bucket is a dict with keys ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` Source code in omicron/dal/influx/influxclient.py async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] list_organizations ( self , offset = 0 , limit = 100 ) async \u00b6 \u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Parameters: Name Type Description Default offset \u5206\u9875\u8d77\u70b9 0 limit \u6bcf\u9875size 100 Exceptions: Type Description InfluxSchemaError influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: Type Description list of organizations, each organization is a dict with keys 1 2 3 4 5 6 id : the id of the org links name : the name of the org description createdAt updatedAt Source code in omicron/dal/influx/influxclient.py async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] query ( self , flux , deserializer = None ) async \u00b6 flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a 1 2 ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 \u4e0a\u8ff0 result \u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Parameters: Name Type Description Default flux Union[omicron.dal.influx.flux.Flux, str] flux\u67e5\u8be2\u8bed\u53e5 required deserializer Callable \u53cd\u5e8f\u5217\u5316\u51fd\u6570 None Returns: Type Description Any \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 Source code in omicron/dal/influx/influxclient.py async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body query_org_id ( self , name = None ) async \u00b6 \u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Parameters: Name Type Description Default name str \u6307\u5b9a\u7ec4\u7ec7\u540d None Returns: Type Description str \u7ec4\u7ec7id Source code in omicron/dal/influx/influxclient.py async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" ) save ( self , data , measurement = None , tag_keys = [], time_key = None , global_tags = {}, chunk_size = None ) async \u00b6 save data into influxdb if data is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If data is str, use write method instead. Parameters: Name Type Description Default data Union[numpy.ndarray, pandas.core.frame.DataFrame] data to be saved required measurement str the name of measurement None tag_keys List[str] which columns name will be used as tags [] chunk_size int number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to self._chunk_size None Exceptions: Type Description InfluxDBWriteError if write failed Source code in omicron/dal/influx/influxclient.py async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) write ( self , line_protocol ) async \u00b6 \u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Parameters: Name Type Description Default line_protocol str \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 required Source code in omicron/dal/influx/influxclient.py async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" )","title":"InfluxClient"},{"location":"api/dal/influxclient/#influxclient---the-performanct-async-client-for-influxdb","text":"Source code in omicron/dal/influx/influxclient.py class InfluxClient : def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" ) async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" )","title":"InfluxClient - the performanct async client for influxdb"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.__init__","text":"[summary] Parameters: Name Type Description Default url [type] [description] required token [type] [description] required bucket [type] [description] required org [type] [description]. Defaults to None. None enable_compress [type] [description]. Defaults to False. False chunk_size int number of lines to be saved in one request 5000 precision str \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 's' Source code in omicron/dal/influx/influxclient.py def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , }","title":"__init__()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.create_bucket","text":"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default description \u6307\u5b9abucket\u7684\u63cf\u8ff0 None org_id str \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 None Exceptions: Type Description InfluxSchemaError \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: Type Description str \u65b0\u521b\u5efa\u7684bucket\u7684id Source code in omicron/dal/influx/influxclient.py async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ]","title":"create_bucket()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.delete","text":"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1 Flux.delete \u3002 Parameters: Name Type Description Default measurement str \u6307\u5b9ameasurement\u540d\u5b57 required stop datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 required start datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START None tags Optional[Dict[str, str]] \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 {} precision str \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 's' Exceptions: Type Description InfluxDeleteError \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 Source code in omicron/dal/influx/influxclient.py async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" )","title":"delete()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.delete_bucket","text":"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default bucket_id str \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 None Source code in omicron/dal/influx/influxclient.py async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" )","title":"delete_bucket()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.drop_measurement","text":"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 Source code in omicron/dal/influx/influxclient.py async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive )","title":"drop_measurement()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.list_buckets","text":"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: Type Description list of buckets, each bucket is a dict with keys ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` Source code in omicron/dal/influx/influxclient.py async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ]","title":"list_buckets()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.list_organizations","text":"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Parameters: Name Type Description Default offset \u5206\u9875\u8d77\u70b9 0 limit \u6bcf\u9875size 100 Exceptions: Type Description InfluxSchemaError influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: Type Description list of organizations, each organization is a dict with keys 1 2 3 4 5 6 id : the id of the org links name : the name of the org description createdAt updatedAt Source code in omicron/dal/influx/influxclient.py async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ]","title":"list_organizations()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.query","text":"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a 1 2 ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 \u4e0a\u8ff0 result \u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Parameters: Name Type Description Default flux Union[omicron.dal.influx.flux.Flux, str] flux\u67e5\u8be2\u8bed\u53e5 required deserializer Callable \u53cd\u5e8f\u5217\u5316\u51fd\u6570 None Returns: Type Description Any \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 Source code in omicron/dal/influx/influxclient.py async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body","title":"query()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.query_org_id","text":"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Parameters: Name Type Description Default name str \u6307\u5b9a\u7ec4\u7ec7\u540d None Returns: Type Description str \u7ec4\u7ec7id Source code in omicron/dal/influx/influxclient.py async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" )","title":"query_org_id()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.save","text":"save data into influxdb if data is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If data is str, use write method instead. Parameters: Name Type Description Default data Union[numpy.ndarray, pandas.core.frame.DataFrame] data to be saved required measurement str the name of measurement None tag_keys List[str] which columns name will be used as tags [] chunk_size int number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to self._chunk_size None Exceptions: Type Description InfluxDBWriteError if write failed Source code in omicron/dal/influx/influxclient.py async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" )","title":"save()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.write","text":"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Parameters: Name Type Description Default line_protocol str \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 required Source code in omicron/dal/influx/influxclient.py async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" )","title":"write()"},{"location":"api/dal/serialize/","text":"Serializer and Deserializer \u00b6 DataFrameDeserializer \u00b6 Source code in omicron/dal/influx/serialize.py class DataframeDeserializer ( Serializer ): def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) def __call__ ( self , data : Union [ str , bytes ]) -> pd . DataFrame : if isinstance ( data , str ): # treat data as string stream = io . StringIO ( data ) else : stream = io . StringIO ( data . decode ( self . encoding )) df = pd . read_csv ( stream , sep = self . sep , header = self . header , names = self . names , index_col = self . index_col , usecols = self . usecols , dtype = self . dtype , engine = self . engine , converters = self . converters , skiprows = self . skiprows , skipfooter = self . skipfooter , infer_datetime_format = self . infer_datetime_format , lineterminator = self . lineterminator , ** self . kwargs , ) if self . usecols : df = df [ list ( self . usecols )] if self . sort_values is not None : return df . sort_values ( self . sort_values ) else : return df __init__ ( self , sort_values = None , encoding = 'utf-8' , names = None , usecols = None , dtype = None , time_col = None , sep = ',' , header = 'infer' , engine = None , infer_datetime_format = True , lineterminator = None , converters = None , skipfooter = 0 , index_col = None , skiprows = None , ** kwargs ) special \u00b6 constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: pandas.read_csv for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. 1 - specify dtype when possible use usecols to specify the columns to read, and names to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when names is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in names to access the data (instead of which in usecols ). Examples: >>> data = \",result,table,_time,code,name \\r\\n ,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer ( names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" , \"name\" ], usecols = [ \"frame\" , \"code\" , \"name\" ]) >>> des ( data ) frame code name 0 2019 - 01 - 01 T09 : 31 : 00 Z 000002. XSHE \u56fd\u8054\u8bc1\u5238 Parameters: Name Type Description Default sort_values Union[str, List[str]] sort the dataframe by the specified columns None encoding str if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array 'utf-8' sep str the separator/delimiter of each fields ',' header Union[int, List[int], str] the row number of the header, default is 'infer' 'infer' names List[str] the column names of the dataframe None index_col Union[int, str, List[int], List[str], bool] the column number or name of the index column None usecols Union[List[int], List[str]] the column name of the columns to use None dtype dict the dtype of the columns None engine str the engine of the csv file, default is None None converters dict specify converter for columns. None skiprows Union[int, List[int], Callable] the row number to skip None skipfooter the row number to skip at the end of the file 0 time_col Union[int, str] the columns to parse as dates None infer_datetime_format whether to infer the datetime format True lineterminator str the line terminator of the csv file, only valid when engine is 'c' None kwargs other arguments {} Source code in omicron/dal/influx/serialize.py def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) NumpyDeserializer \u00b6 Source code in omicron/dal/influx/serialize.py class NumpyDeserializer ( Serializer ): def __init__ ( self , dtype : List [ tuple ] = \"float\" , sort_values : Union [ str , List [ str ]] = None , use_cols : Union [ List [ str ], List [ int ]] = None , parse_date : Union [ int , str ] = \"_time\" , sep : str = \",\" , encoding : str = \"utf-8\" , skip_rows : Union [ int , List [ int ]] = 1 , header_line : int = 1 , comments : str = \"#\" , converters : Mapping [ int , Callable ] = None , ): \"\"\"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. ``` dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None def _parse_header_once ( self , stream ): \"\"\"parse header and convert use_cols, if columns is specified in string. And if parse_date is required, add it into converters Args: stream : [description] Raises: SerializationError: [description] \"\"\" if self . header_line is None or self . _parsed_headers is not None : return try : line = stream . readlines ( self . header_line )[ - 1 ] cols = line . strip () . split ( self . sep ) self . _parsed_headers = cols use_cols = self . use_cols if use_cols is not None and isinstance ( use_cols [ 0 ], str ): self . use_cols = [ cols . index ( col ) for col in self . use_cols ] # convert keys of converters to int converters = { cols . index ( k ): v for k , v in self . converters . items ()} self . converters = converters if isinstance ( self . parse_date , str ): parse_date = cols . index ( self . parse_date ) if parse_date in self . converters . keys (): logger . debug ( \"specify duplicated converter in both parse_date and converters for col %s , use converters.\" , self . parse_date , ) else : # \u589e\u52a0parse_date\u5230converters self . converters [ parse_date ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) stream . seek ( 0 ) except ( IndexError , ValueError ): if line . strip () == \"\" : content = \"\" . join ( stream . readlines ()) . strip () if len ( content ) > 0 : raise SerializationError ( f \"specified heder line { self . header_line } is empty\" ) else : raise EmptyResult () else : raise SerializationError ( f \"bad header[ { self . header_line } ]: { line } \" ) def __call__ ( self , data : bytes ) -> np . ndarray : if self . encoding and isinstance ( data , bytes ): stream = io . StringIO ( data . decode ( self . encoding )) else : stream = io . StringIO ( data ) try : self . _parse_header_once ( stream ) except EmptyResult : return np . empty (( 0 ,), dtype = self . dtype ) arr = np . loadtxt ( stream . readlines (), delimiter = self . sep , skiprows = self . skip_rows , dtype = self . dtype , usecols = self . use_cols , converters = self . converters , encoding = self . encoding , ) # \u5982\u679c\u8fd4\u56de\u4ec5\u4e00\u6761\u8bb0\u5f55\uff0c\u6709\u65f6\u4f1a\u51fa\u73b0 shape == () if arr . shape == tuple (): arr = arr . reshape (( - 1 ,)) if self . sort_values is not None and arr . size > 1 : return np . sort ( arr , order = self . sort_values ) else : return arr __init__ ( self , dtype = 'float' , sort_values = None , use_cols = None , parse_date = '_time' , sep = ',' , encoding = 'utf-8' , skip_rows = 1 , header_line = 1 , comments = '#' , converters = None ) special \u00b6 construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. 1 dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None","title":"Serialize"},{"location":"api/dal/serialize/#serializer-and-deserializer","text":"","title":"Serializer and Deserializer"},{"location":"api/dal/serialize/#dataframedeserializer","text":"Source code in omicron/dal/influx/serialize.py class DataframeDeserializer ( Serializer ): def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) def __call__ ( self , data : Union [ str , bytes ]) -> pd . DataFrame : if isinstance ( data , str ): # treat data as string stream = io . StringIO ( data ) else : stream = io . StringIO ( data . decode ( self . encoding )) df = pd . read_csv ( stream , sep = self . sep , header = self . header , names = self . names , index_col = self . index_col , usecols = self . usecols , dtype = self . dtype , engine = self . engine , converters = self . converters , skiprows = self . skiprows , skipfooter = self . skipfooter , infer_datetime_format = self . infer_datetime_format , lineterminator = self . lineterminator , ** self . kwargs , ) if self . usecols : df = df [ list ( self . usecols )] if self . sort_values is not None : return df . sort_values ( self . sort_values ) else : return df","title":"DataFrameDeserializer"},{"location":"api/dal/serialize/#omicron.dal.influx.serialize.DataframeDeserializer.__init__","text":"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: pandas.read_csv for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. 1 - specify dtype when possible use usecols to specify the columns to read, and names to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when names is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in names to access the data (instead of which in usecols ). Examples: >>> data = \",result,table,_time,code,name \\r\\n ,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer ( names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" , \"name\" ], usecols = [ \"frame\" , \"code\" , \"name\" ]) >>> des ( data ) frame code name 0 2019 - 01 - 01 T09 : 31 : 00 Z 000002. XSHE \u56fd\u8054\u8bc1\u5238 Parameters: Name Type Description Default sort_values Union[str, List[str]] sort the dataframe by the specified columns None encoding str if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array 'utf-8' sep str the separator/delimiter of each fields ',' header Union[int, List[int], str] the row number of the header, default is 'infer' 'infer' names List[str] the column names of the dataframe None index_col Union[int, str, List[int], List[str], bool] the column number or name of the index column None usecols Union[List[int], List[str]] the column name of the columns to use None dtype dict the dtype of the columns None engine str the engine of the csv file, default is None None converters dict specify converter for columns. None skiprows Union[int, List[int], Callable] the row number to skip None skipfooter the row number to skip at the end of the file 0 time_col Union[int, str] the columns to parse as dates None infer_datetime_format whether to infer the datetime format True lineterminator str the line terminator of the csv file, only valid when engine is 'c' None kwargs other arguments {} Source code in omicron/dal/influx/serialize.py def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x )","title":"__init__()"},{"location":"api/dal/serialize/#numpydeserializer","text":"Source code in omicron/dal/influx/serialize.py class NumpyDeserializer ( Serializer ): def __init__ ( self , dtype : List [ tuple ] = \"float\" , sort_values : Union [ str , List [ str ]] = None , use_cols : Union [ List [ str ], List [ int ]] = None , parse_date : Union [ int , str ] = \"_time\" , sep : str = \",\" , encoding : str = \"utf-8\" , skip_rows : Union [ int , List [ int ]] = 1 , header_line : int = 1 , comments : str = \"#\" , converters : Mapping [ int , Callable ] = None , ): \"\"\"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. ``` dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None def _parse_header_once ( self , stream ): \"\"\"parse header and convert use_cols, if columns is specified in string. And if parse_date is required, add it into converters Args: stream : [description] Raises: SerializationError: [description] \"\"\" if self . header_line is None or self . _parsed_headers is not None : return try : line = stream . readlines ( self . header_line )[ - 1 ] cols = line . strip () . split ( self . sep ) self . _parsed_headers = cols use_cols = self . use_cols if use_cols is not None and isinstance ( use_cols [ 0 ], str ): self . use_cols = [ cols . index ( col ) for col in self . use_cols ] # convert keys of converters to int converters = { cols . index ( k ): v for k , v in self . converters . items ()} self . converters = converters if isinstance ( self . parse_date , str ): parse_date = cols . index ( self . parse_date ) if parse_date in self . converters . keys (): logger . debug ( \"specify duplicated converter in both parse_date and converters for col %s , use converters.\" , self . parse_date , ) else : # \u589e\u52a0parse_date\u5230converters self . converters [ parse_date ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) stream . seek ( 0 ) except ( IndexError , ValueError ): if line . strip () == \"\" : content = \"\" . join ( stream . readlines ()) . strip () if len ( content ) > 0 : raise SerializationError ( f \"specified heder line { self . header_line } is empty\" ) else : raise EmptyResult () else : raise SerializationError ( f \"bad header[ { self . header_line } ]: { line } \" ) def __call__ ( self , data : bytes ) -> np . ndarray : if self . encoding and isinstance ( data , bytes ): stream = io . StringIO ( data . decode ( self . encoding )) else : stream = io . StringIO ( data ) try : self . _parse_header_once ( stream ) except EmptyResult : return np . empty (( 0 ,), dtype = self . dtype ) arr = np . loadtxt ( stream . readlines (), delimiter = self . sep , skiprows = self . skip_rows , dtype = self . dtype , usecols = self . use_cols , converters = self . converters , encoding = self . encoding , ) # \u5982\u679c\u8fd4\u56de\u4ec5\u4e00\u6761\u8bb0\u5f55\uff0c\u6709\u65f6\u4f1a\u51fa\u73b0 shape == () if arr . shape == tuple (): arr = arr . reshape (( - 1 ,)) if self . sort_values is not None and arr . size > 1 : return np . sort ( arr , order = self . sort_values ) else : return arr","title":"NumpyDeserializer"},{"location":"api/dal/serialize/#omicron.dal.influx.serialize.NumpyDeserializer.__init__","text":"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. 1 dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None","title":"__init__()"},{"location":"api/plotting/candlestick/","text":"\u7ed8\u5236K\u7ebf\u56fe\u3002 \u7528\u6cd5\u793a\u4f8b \u00b6 \u6ce8\u610f\u793a\u4f8b\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5426\u5219\u65e0\u6cd5\u751f\u6210\u56fe\u3002 1 2 3 4 5 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars ) cs . plot () \u8fd9\u5c06\u751f\u6210\u4e0b\u56fe\uff1a \u9ed8\u8ba4\u5730\uff0c\u5c06\u663e\u793a\u6210\u4ea4\u91cf\u548cRSI\u6307\u6807\u4e24\u4e2a\u526f\u56fe\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6765\u5b9a\u5236\uff1a 1 2 3 4 5 cs = Candlestick ( bars , show_volume = True , show_rsi = True , show_peaks = False } cs . plot () \u589e\u52a0\u6807\u8bb0 \u00b6 1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_marks ([ 20 , 50 ]) cs . plot () \u8fd9\u5c06\u5728k\u7ebf\u4e0a\u663e\u793a\u4e24\u4e2a\u52a0\u53f7\uff1a \u663e\u793a\u5e03\u6797\u5e26 \u00b6 1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_indicator ( \"bbands\" , 20 ) cs . plot () \u663e\u793a\u5e73\u53f0 \u00b6 1 2 3 4 5 6 7 8 9 10 11 12 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . mark_bbox () cs . plot () Candlestick \u00b6 Source code in omicron/plotting/candlestick.py class Candlestick : RED = \"#FF4136\" GREEN = \"#3DAA70\" TRANSPARENT = \"rgba(0,0,0,0)\" LIGHT_GRAY = \"rgba(0, 0, 0, 0.1)\" MA_COLORS = { 5 : \"#1432F5\" , 10 : \"#EB52F7\" , 20 : \"#C0C0C0\" , 30 : \"#882111\" , 60 : \"#5E8E28\" , 120 : \"#4294F7\" , 250 : \"#F09937\" , } def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line @property def figure ( self ): \"\"\"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61\"\"\" rows = len ( self . ind_traces ) + 1 specs = [[{ \"secondary_y\" : False }]] * rows specs [ 0 ][ 0 ][ \"secondary_y\" ] = True row_heights = [ 0.7 , * ([ 0.3 / ( rows - 1 )] * ( rows - 1 ))] print ( row_heights ) cols = 1 fig = make_subplots ( rows = rows , cols = cols , shared_xaxes = True , vertical_spacing = 0.1 , subplot_titles = ( self . title , * self . ind_traces . keys ()), row_heights = row_heights , specs = specs , ) for _ , trace in self . main_traces . items (): fig . add_trace ( trace , row = 1 , col = 1 ) for i , ( _ , trace ) in enumerate ( self . ind_traces . items ()): fig . add_trace ( trace , row = i + 2 , col = 1 ) ymin = np . min ( self . bars [ \"low\" ]) ymax = np . max ( self . bars [ \"high\" ]) ylim = [ ymin * 0.95 , ymax * 1.05 ] # \u663e\u793a\u5341\u5b57\u5149\u6807 fig . update_xaxes ( showgrid = False , showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikecolor = \"grey\" , spikedash = \"solid\" , spikethickness = 1 , ) fig . update_yaxes ( showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikedash = \"solid\" , spikecolor = \"grey\" , spikethickness = 1 , showgrid = True , gridcolor = self . LIGHT_GRAY , ) fig . update_xaxes ( nticks = len ( self . bars ) // 10 , ticklen = 10 , ticks = \"outside\" , minor = dict ( nticks = 5 , ticklen = 5 , ticks = \"outside\" ), row = rows , col = 1 , ) # \u8bbe\u7f6eK\u7ebf\u663e\u793a\u533a\u57df if self . width : win_size = int ( self . width // 10 ) else : win_size = 120 fig . update_xaxes ( type = \"category\" , range = [ len ( self . bars ) - win_size , len ( self . bars ) - 1 ] ) fig . update_layout ( yaxis = dict ( range = ylim ), hovermode = \"x unified\" , plot_bgcolor = self . TRANSPARENT , xaxis_rangeslider_visible = False , ) if self . width : fig . update_layout ( width = self . width ) if self . height : fig . update_layout ( height = self . height ) return fig def _format_tick ( self , tm : np . array ) -> NDArray : if tm . item ( 0 ) . hour == 0 : # assume it's date return np . array ( [ f \" { x . item () . year : 02 } - { x . item () . month : 02 } - { x . item () . day : 02 } \" for x in tm ] ) else : return np . array ( [ f \" { x . item () . month : 02 } - { x . item () . day : 02 } { x . item () . hour : 02 } : { x . item () . minute : 02 } \" for x in tm ] ) def _remove_ma ( self ): traces = {} for name in self . main_traces : if not name . startswith ( \"ma\" ): traces [ name ] = self . main_traces [ name ] self . main_traces = traces def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show () figure property readonly \u00b6 \u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61 __init__ ( self , bars , ma_groups = None , title = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs ) special \u00b6 \u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default bars ndarray \u884c\u60c5\u6570\u636e required ma_groups List[int] \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e len(bars) - 5 \u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 None title str k\u7ebf\u56fe\u7684\u6807\u9898 None show_volume \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe True show_rsi \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 True show_peaks \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 False width the width in 'px' units of the figure None height the height in 'px' units of the figure None Keyword arguments: Name Type Description rsi_win int default is 6 Source code in omicron/plotting/candlestick.py def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line add_bounding_box ( self , boxes ) \u00b6 bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Parameters: Name Type Description Default boxes List[Tuple] \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 required Source code in omicron/plotting/candlestick.py def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace add_indicator ( self , indicator , ** kwargs ) \u00b6 \u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Parameters: Name Type Description Default indicator str \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' required kwargs \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window {} Source code in omicron/plotting/candlestick.py def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace add_line ( self , trace_name , x , y ) \u00b6 \u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5 x , y \u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required x x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] required y y\u503c required Source code in omicron/plotting/candlestick.py def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line add_main_trace ( self , trace_name , ** kwargs ) \u00b6 add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required **kwargs \u5176\u4ed6\u53c2\u6570 {} Source code in omicron/plotting/candlestick.py def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) add_marks ( self , x , y , name , marker = 'cross' , color = None ) \u00b6 \u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9 Source code in omicron/plotting/candlestick.py def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace mark_backtest_result ( self , result ) \u00b6 \u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e Todo \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Parameters: Name Type Description Default points \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 required Source code in omicron/plotting/candlestick.py def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace mark_bbox ( self , min_size = 20 ) \u00b6 \u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Parameters: Name Type Description Default min_size \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 20 Source code in omicron/plotting/candlestick.py def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) mark_peaks_and_valleys ( self , up_thres = None , down_thres = None ) \u00b6 \u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Parameters: Name Type Description Default up_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None down_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None Source code in omicron/plotting/candlestick.py def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace mark_support_resist_lines ( self , upthres = None , downthres = None , use_close = True , win = 60 ) \u00b6 \u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728 win \u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Parameters: Name Type Description Default upthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None downthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys . None use_close \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. True win \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. 60 Source code in omicron/plotting/candlestick.py def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) plot ( self ) \u00b6 \u7ed8\u5236\u56fe\u8868 Source code in omicron/plotting/candlestick.py def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"CandleStick"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u7528\u6cd5\u793a\u4f8b","text":"\u6ce8\u610f\u793a\u4f8b\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5426\u5219\u65e0\u6cd5\u751f\u6210\u56fe\u3002 1 2 3 4 5 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars ) cs . plot () \u8fd9\u5c06\u751f\u6210\u4e0b\u56fe\uff1a \u9ed8\u8ba4\u5730\uff0c\u5c06\u663e\u793a\u6210\u4ea4\u91cf\u548cRSI\u6307\u6807\u4e24\u4e2a\u526f\u56fe\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6765\u5b9a\u5236\uff1a 1 2 3 4 5 cs = Candlestick ( bars , show_volume = True , show_rsi = True , show_peaks = False } cs . plot ()","title":"\u7528\u6cd5\u793a\u4f8b"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u589e\u52a0\u6807\u8bb0","text":"1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_marks ([ 20 , 50 ]) cs . plot () \u8fd9\u5c06\u5728k\u7ebf\u4e0a\u663e\u793a\u4e24\u4e2a\u52a0\u53f7\uff1a","title":"\u589e\u52a0\u6807\u8bb0"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u663e\u793a\u5e03\u6797\u5e26","text":"1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_indicator ( \"bbands\" , 20 ) cs . plot ()","title":"\u663e\u793a\u5e03\u6797\u5e26"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u663e\u793a\u5e73\u53f0","text":"1 2 3 4 5 6 7 8 9 10 11 12 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . mark_bbox () cs . plot ()","title":"\u663e\u793a\u5e73\u53f0"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick","text":"Source code in omicron/plotting/candlestick.py class Candlestick : RED = \"#FF4136\" GREEN = \"#3DAA70\" TRANSPARENT = \"rgba(0,0,0,0)\" LIGHT_GRAY = \"rgba(0, 0, 0, 0.1)\" MA_COLORS = { 5 : \"#1432F5\" , 10 : \"#EB52F7\" , 20 : \"#C0C0C0\" , 30 : \"#882111\" , 60 : \"#5E8E28\" , 120 : \"#4294F7\" , 250 : \"#F09937\" , } def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line @property def figure ( self ): \"\"\"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61\"\"\" rows = len ( self . ind_traces ) + 1 specs = [[{ \"secondary_y\" : False }]] * rows specs [ 0 ][ 0 ][ \"secondary_y\" ] = True row_heights = [ 0.7 , * ([ 0.3 / ( rows - 1 )] * ( rows - 1 ))] print ( row_heights ) cols = 1 fig = make_subplots ( rows = rows , cols = cols , shared_xaxes = True , vertical_spacing = 0.1 , subplot_titles = ( self . title , * self . ind_traces . keys ()), row_heights = row_heights , specs = specs , ) for _ , trace in self . main_traces . items (): fig . add_trace ( trace , row = 1 , col = 1 ) for i , ( _ , trace ) in enumerate ( self . ind_traces . items ()): fig . add_trace ( trace , row = i + 2 , col = 1 ) ymin = np . min ( self . bars [ \"low\" ]) ymax = np . max ( self . bars [ \"high\" ]) ylim = [ ymin * 0.95 , ymax * 1.05 ] # \u663e\u793a\u5341\u5b57\u5149\u6807 fig . update_xaxes ( showgrid = False , showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikecolor = \"grey\" , spikedash = \"solid\" , spikethickness = 1 , ) fig . update_yaxes ( showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikedash = \"solid\" , spikecolor = \"grey\" , spikethickness = 1 , showgrid = True , gridcolor = self . LIGHT_GRAY , ) fig . update_xaxes ( nticks = len ( self . bars ) // 10 , ticklen = 10 , ticks = \"outside\" , minor = dict ( nticks = 5 , ticklen = 5 , ticks = \"outside\" ), row = rows , col = 1 , ) # \u8bbe\u7f6eK\u7ebf\u663e\u793a\u533a\u57df if self . width : win_size = int ( self . width // 10 ) else : win_size = 120 fig . update_xaxes ( type = \"category\" , range = [ len ( self . bars ) - win_size , len ( self . bars ) - 1 ] ) fig . update_layout ( yaxis = dict ( range = ylim ), hovermode = \"x unified\" , plot_bgcolor = self . TRANSPARENT , xaxis_rangeslider_visible = False , ) if self . width : fig . update_layout ( width = self . width ) if self . height : fig . update_layout ( height = self . height ) return fig def _format_tick ( self , tm : np . array ) -> NDArray : if tm . item ( 0 ) . hour == 0 : # assume it's date return np . array ( [ f \" { x . item () . year : 02 } - { x . item () . month : 02 } - { x . item () . day : 02 } \" for x in tm ] ) else : return np . array ( [ f \" { x . item () . month : 02 } - { x . item () . day : 02 } { x . item () . hour : 02 } : { x . item () . minute : 02 } \" for x in tm ] ) def _remove_ma ( self ): traces = {} for name in self . main_traces : if not name . startswith ( \"ma\" ): traces [ name ] = self . main_traces [ name ] self . main_traces = traces def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"Candlestick"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.figure","text":"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61","title":"figure"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.__init__","text":"\u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default bars ndarray \u884c\u60c5\u6570\u636e required ma_groups List[int] \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e len(bars) - 5 \u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 None title str k\u7ebf\u56fe\u7684\u6807\u9898 None show_volume \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe True show_rsi \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 True show_peaks \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 False width the width in 'px' units of the figure None height the height in 'px' units of the figure None Keyword arguments: Name Type Description rsi_win int default is 6 Source code in omicron/plotting/candlestick.py def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line","title":"__init__()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_bounding_box","text":"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Parameters: Name Type Description Default boxes List[Tuple] \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 required Source code in omicron/plotting/candlestick.py def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace","title":"add_bounding_box()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_indicator","text":"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Parameters: Name Type Description Default indicator str \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' required kwargs \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window {} Source code in omicron/plotting/candlestick.py def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace","title":"add_indicator()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_line","text":"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5 x , y \u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required x x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] required y y\u503c required Source code in omicron/plotting/candlestick.py def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line","title":"add_line()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_main_trace","text":"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required **kwargs \u5176\u4ed6\u53c2\u6570 {} Source code in omicron/plotting/candlestick.py def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" ))","title":"add_main_trace()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_marks","text":"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9 Source code in omicron/plotting/candlestick.py def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace","title":"add_marks()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_backtest_result","text":"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e Todo \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Parameters: Name Type Description Default points \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 required Source code in omicron/plotting/candlestick.py def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace","title":"mark_backtest_result()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_bbox","text":"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Parameters: Name Type Description Default min_size \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 20 Source code in omicron/plotting/candlestick.py def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes )","title":"mark_bbox()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_peaks_and_valleys","text":"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Parameters: Name Type Description Default up_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None down_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None Source code in omicron/plotting/candlestick.py def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace","title":"mark_peaks_and_valleys()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_support_resist_lines","text":"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728 win \u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Parameters: Name Type Description Default upthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None downthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys . None use_close \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. True win \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. 60 Source code in omicron/plotting/candlestick.py def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x ))","title":"mark_support_resist_lines()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.plot","text":"\u7ed8\u5236\u56fe\u8868 Source code in omicron/plotting/candlestick.py def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"plot()"},{"location":"api/plotting/metrics/","text":"\u7ed8\u5236\u56de\u6d4b\u8d44\u4ea7\u66f2\u7ebf\u548c\u6307\u6807\u56fe\u3002 \u793a\u4f8b: 1 2 3 4 5 6 from omicron.plotting import MetricsGraph # calling some strategy's backtest and get bills/metrics mg = MetricsGraph ( bills , metrics ) await mg . plot () \u6ce8\u610f\u6b64\u65b9\u6cd5\u9700\u8981\u5728notebook\u4e2d\u8c03\u7528\u3002 MetricsGraph \u00b6 Source code in omicron/plotting/metrics.py class MetricsGraph : def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" ), how = \"right\" ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" def _fill_missing_prices ( self , bars : BarsArray , frames : Union [ List , NDArray ]): \"\"\"\u5c06bars\u4e2d\u7f3a\u5931\u503c\u91c7\u7528\u5176\u524d\u503c\u66ff\u6362 \u5f53baseline\u4e3a\u4e2a\u80a1\u65f6\uff0c\u53ef\u80fd\u5b58\u5728\u505c\u724c\u7684\u60c5\u51b5\uff0c\u8fd9\u6837\u5bfc\u81f4\u7531\u6b64\u8ba1\u7b97\u7684\u53c2\u8003\u6536\u76ca\u65e0\u6cd5\u4e0e\u56de\u6d4b\u7684\u8d44\u4ea7\u6536\u76ca\u5bf9\u9f50\uff0c\u56e0\u6b64\u9700\u8981\u8fdb\u884c\u8c03\u6574\u3002 \u51fa\u4e8e\u8fd9\u4e2a\u76ee\u7684\uff0c\u672c\u51fd\u6570\u53ea\u8fd4\u56de\u5904\u7406\u540e\u7684\u6536\u76d8\u4ef7\u3002 Args: bars: \u57fa\u7ebf\u884c\u60c5\u6570\u636e\u3002 frames: \u65e5\u671f\u7d22\u5f15 Returns: \u8865\u5145\u7f3a\u5931\u503c\u540e\u7684\u6536\u76d8\u4ef7\u5e8f\u5217 \"\"\" _close = pd . DataFrame ( { \"close\" : pd . Series ( bars [ \"close\" ], index = bars [ \"frame\" ]), \"frame\" : pd . Series ( np . empty (( len ( frames ),)), index = frames ), } )[ \"close\" ] . to_numpy () # \u8fd9\u91cc\u4f7f\u7528omicron\u4e2d\u7684fill_nan\uff0c\u662f\u56e0\u4e3a\u5982\u679c\u6570\u7ec4\u7684\u7b2c\u4e00\u4e2a\u5143\u7d20\u5373\u4e3aNaN\u7684\u8bdd\uff0c\u90a3\u4e48DataFrame.fillna(method='ffill')\u5c06\u65e0\u6cd5\u5904\u7406\u8fd9\u6837\u7684\u60c5\u51b5(\u4ecd\u7136\u4fdd\u6301\u4e3anan) return fill_nan ( _close ) def _format_tick ( self , frames : Union [ Frame , List [ Frame ]]) -> Union [ str , NDArray ]: if type ( frames ) == datetime . date : x = frames return f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" elif type ( frames ) == datetime . datetime : x = frames return f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" elif type ( frames [ 0 ]) == datetime . date : # type: ignore return np . array ([ f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" for x in frames ]) else : return np . array ( [ f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" for x in frames ] # type: ignore ) async def _metrics_trace ( self ): metric_names = { \"start\" : \"\u8d77\u59cb\u65e5\" , \"end\" : \"\u7ed3\u675f\u65e5\" , \"window\" : \"\u8d44\u4ea7\u66b4\u9732\u7a97\u53e3\" , \"total_tx\" : \"\u4ea4\u6613\u6b21\u6570\" , \"total_profit\" : \"\u603b\u5229\u6da6\" , \"total_profit_rate\" : \"\u5229\u6da6\u7387\" , \"win_rate\" : \"\u80dc\u7387\" , \"mean_return\" : \"\u65e5\u5747\u56de\u62a5\" , \"sharpe\" : \"\u590f\u666e\u7387\" , \"max_drawdown\" : \"\u6700\u5927\u56de\u64a4\" , \"annual_return\" : \"\u5e74\u5316\u56de\u62a5\" , \"volatility\" : \"\u6ce2\u52a8\u7387\" , \"sortino\" : \"sortino\" , \"calmar\" : \"calmar\" , } # bug: plotly go.Table.Cells format not work here metric_formatter = { \"start\" : \" {} \" , \"end\" : \" {} \" , \"window\" : \" {} \" , \"total_tx\" : \" {} \" , \"total_profit\" : \" {:.2f} \" , \"total_profit_rate\" : \" {:.2%} \" , \"win_rate\" : \" {:.2%} \" , \"mean_return\" : \" {:.2%} \" , \"sharpe\" : \" {:.2f} \" , \"max_drawdown\" : \" {:.2%} \" , \"annual_return\" : \" {:.2%} \" , \"volatility\" : \" {:.2%} \" , \"sortino\" : \" {:.2f} \" , \"calmar\" : \" {:.2f} \" , } metrics = deepcopy ( self . metrics ) baseline = metrics [ \"baseline\" ] or {} del metrics [ \"baseline\" ] baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) metrics_formatted = [] for k in metric_names . keys (): if metrics . get ( k ): metrics_formatted . append ( metric_formatter [ k ] . format ( metrics . get ( k ))) else : metrics_formatted . append ( \"-\" ) baseline_formatted = [] for k in metric_names . keys (): if baseline . get ( k ): baseline_formatted . append ( metric_formatter [ k ] . format ( baseline . get ( k ))) else : baseline_formatted . append ( \"-\" ) return go . Table ( header = dict ( values = [ \"\u6307\u6807\u540d\" , \"\u7b56\u7565\" , baseline_name ]), cells = dict ( values = [ [ v for _ , v in metric_names . items ()], metrics_formatted , baseline_formatted , ], font_size = 10 , ), ) async def _trade_info_trace ( self ): \"\"\"\u6784\u5efahover text \u5e8f\u5217\"\"\" # convert trades into hover_info buys = defaultdict ( list ) sells = defaultdict ( list ) for _ , trade in self . trades . items (): trade_date = arrow . get ( trade [ \"time\" ]) . date () ipos = self . _frame2pos . get ( trade_date ) if ipos is None : logger . warning ( \"date %s in trade record not in backtest range\" , trade_date ) continue name = await Security . alias ( trade [ \"security\" ]) price = trade [ \"price\" ] side = trade [ \"order_side\" ] filled = trade [ \"filled\" ] trade_text = f \" { side } : { name } { filled / 100 : .0f } \u624b \u4ef7\u683c: { price : .02f } \u6210\u4ea4\u989d: { filled * price / 10000 : .1f } \u4e07\" if side == \"\u5356\u51fa\" : sells [ trade_date ] . append ( trade_text ) elif side in ( \"\u4e70\u5165\" , \"\u5206\u7ea2\u914d\u80a1\" ): buys [ trade_date ] . append ( trade_text ) X_buy , Y_buy , data_buy = [], [], [] X_sell , Y_sell , data_sell = [], [], [] for dt , text in buys . items (): ipos = self . _frame2pos . get ( dt ) Y_buy . append ( self . nv [ ipos ]) X_buy . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_buy . append ( hover ) trace_buy = go . Scatter ( x = X_buy , y = Y_buy , mode = \"markers\" , text = data_buy , name = \"\u4e70\u5165\u6210\u4ea4\" , marker = dict ( color = \"red\" , symbol = \"triangle-up\" ), hovertemplate = \"
% {text} \" , ) for dt , text in sells . items (): ipos = self . _frame2pos . get ( dt ) Y_sell . append ( self . nv [ ipos ]) X_sell . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_sell . append ( hover ) trace_sell = go . Scatter ( x = X_sell , y = Y_sell , mode = \"markers\" , text = data_sell , name = \"\u5356\u51fa\u6210\u4ea4\" , marker = dict ( color = \"green\" , symbol = \"triangle-down\" ), hovertemplate = \"
% {text} \" , ) return trace_buy , trace_sell async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show () __init__ ( self , bills , metrics , baseline_code = '399300.XSHE' , indicator = None ) special \u00b6 Parameters: Name Type Description Default bills dict \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 required metrics dict \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 required baseline_code str \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 '399300.XSHE' indicator Optional[pandas.core.frame.DataFrame] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe None Source code in omicron/plotting/metrics.py def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" ), how = \"right\" ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" plot ( self ) async \u00b6 \u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe Source code in omicron/plotting/metrics.py async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"MetricsGraph"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph","text":"Source code in omicron/plotting/metrics.py class MetricsGraph : def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" ), how = \"right\" ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" def _fill_missing_prices ( self , bars : BarsArray , frames : Union [ List , NDArray ]): \"\"\"\u5c06bars\u4e2d\u7f3a\u5931\u503c\u91c7\u7528\u5176\u524d\u503c\u66ff\u6362 \u5f53baseline\u4e3a\u4e2a\u80a1\u65f6\uff0c\u53ef\u80fd\u5b58\u5728\u505c\u724c\u7684\u60c5\u51b5\uff0c\u8fd9\u6837\u5bfc\u81f4\u7531\u6b64\u8ba1\u7b97\u7684\u53c2\u8003\u6536\u76ca\u65e0\u6cd5\u4e0e\u56de\u6d4b\u7684\u8d44\u4ea7\u6536\u76ca\u5bf9\u9f50\uff0c\u56e0\u6b64\u9700\u8981\u8fdb\u884c\u8c03\u6574\u3002 \u51fa\u4e8e\u8fd9\u4e2a\u76ee\u7684\uff0c\u672c\u51fd\u6570\u53ea\u8fd4\u56de\u5904\u7406\u540e\u7684\u6536\u76d8\u4ef7\u3002 Args: bars: \u57fa\u7ebf\u884c\u60c5\u6570\u636e\u3002 frames: \u65e5\u671f\u7d22\u5f15 Returns: \u8865\u5145\u7f3a\u5931\u503c\u540e\u7684\u6536\u76d8\u4ef7\u5e8f\u5217 \"\"\" _close = pd . DataFrame ( { \"close\" : pd . Series ( bars [ \"close\" ], index = bars [ \"frame\" ]), \"frame\" : pd . Series ( np . empty (( len ( frames ),)), index = frames ), } )[ \"close\" ] . to_numpy () # \u8fd9\u91cc\u4f7f\u7528omicron\u4e2d\u7684fill_nan\uff0c\u662f\u56e0\u4e3a\u5982\u679c\u6570\u7ec4\u7684\u7b2c\u4e00\u4e2a\u5143\u7d20\u5373\u4e3aNaN\u7684\u8bdd\uff0c\u90a3\u4e48DataFrame.fillna(method='ffill')\u5c06\u65e0\u6cd5\u5904\u7406\u8fd9\u6837\u7684\u60c5\u51b5(\u4ecd\u7136\u4fdd\u6301\u4e3anan) return fill_nan ( _close ) def _format_tick ( self , frames : Union [ Frame , List [ Frame ]]) -> Union [ str , NDArray ]: if type ( frames ) == datetime . date : x = frames return f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" elif type ( frames ) == datetime . datetime : x = frames return f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" elif type ( frames [ 0 ]) == datetime . date : # type: ignore return np . array ([ f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" for x in frames ]) else : return np . array ( [ f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" for x in frames ] # type: ignore ) async def _metrics_trace ( self ): metric_names = { \"start\" : \"\u8d77\u59cb\u65e5\" , \"end\" : \"\u7ed3\u675f\u65e5\" , \"window\" : \"\u8d44\u4ea7\u66b4\u9732\u7a97\u53e3\" , \"total_tx\" : \"\u4ea4\u6613\u6b21\u6570\" , \"total_profit\" : \"\u603b\u5229\u6da6\" , \"total_profit_rate\" : \"\u5229\u6da6\u7387\" , \"win_rate\" : \"\u80dc\u7387\" , \"mean_return\" : \"\u65e5\u5747\u56de\u62a5\" , \"sharpe\" : \"\u590f\u666e\u7387\" , \"max_drawdown\" : \"\u6700\u5927\u56de\u64a4\" , \"annual_return\" : \"\u5e74\u5316\u56de\u62a5\" , \"volatility\" : \"\u6ce2\u52a8\u7387\" , \"sortino\" : \"sortino\" , \"calmar\" : \"calmar\" , } # bug: plotly go.Table.Cells format not work here metric_formatter = { \"start\" : \" {} \" , \"end\" : \" {} \" , \"window\" : \" {} \" , \"total_tx\" : \" {} \" , \"total_profit\" : \" {:.2f} \" , \"total_profit_rate\" : \" {:.2%} \" , \"win_rate\" : \" {:.2%} \" , \"mean_return\" : \" {:.2%} \" , \"sharpe\" : \" {:.2f} \" , \"max_drawdown\" : \" {:.2%} \" , \"annual_return\" : \" {:.2%} \" , \"volatility\" : \" {:.2%} \" , \"sortino\" : \" {:.2f} \" , \"calmar\" : \" {:.2f} \" , } metrics = deepcopy ( self . metrics ) baseline = metrics [ \"baseline\" ] or {} del metrics [ \"baseline\" ] baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) metrics_formatted = [] for k in metric_names . keys (): if metrics . get ( k ): metrics_formatted . append ( metric_formatter [ k ] . format ( metrics . get ( k ))) else : metrics_formatted . append ( \"-\" ) baseline_formatted = [] for k in metric_names . keys (): if baseline . get ( k ): baseline_formatted . append ( metric_formatter [ k ] . format ( baseline . get ( k ))) else : baseline_formatted . append ( \"-\" ) return go . Table ( header = dict ( values = [ \"\u6307\u6807\u540d\" , \"\u7b56\u7565\" , baseline_name ]), cells = dict ( values = [ [ v for _ , v in metric_names . items ()], metrics_formatted , baseline_formatted , ], font_size = 10 , ), ) async def _trade_info_trace ( self ): \"\"\"\u6784\u5efahover text \u5e8f\u5217\"\"\" # convert trades into hover_info buys = defaultdict ( list ) sells = defaultdict ( list ) for _ , trade in self . trades . items (): trade_date = arrow . get ( trade [ \"time\" ]) . date () ipos = self . _frame2pos . get ( trade_date ) if ipos is None : logger . warning ( \"date %s in trade record not in backtest range\" , trade_date ) continue name = await Security . alias ( trade [ \"security\" ]) price = trade [ \"price\" ] side = trade [ \"order_side\" ] filled = trade [ \"filled\" ] trade_text = f \" { side } : { name } { filled / 100 : .0f } \u624b \u4ef7\u683c: { price : .02f } \u6210\u4ea4\u989d: { filled * price / 10000 : .1f } \u4e07\" if side == \"\u5356\u51fa\" : sells [ trade_date ] . append ( trade_text ) elif side in ( \"\u4e70\u5165\" , \"\u5206\u7ea2\u914d\u80a1\" ): buys [ trade_date ] . append ( trade_text ) X_buy , Y_buy , data_buy = [], [], [] X_sell , Y_sell , data_sell = [], [], [] for dt , text in buys . items (): ipos = self . _frame2pos . get ( dt ) Y_buy . append ( self . nv [ ipos ]) X_buy . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_buy . append ( hover ) trace_buy = go . Scatter ( x = X_buy , y = Y_buy , mode = \"markers\" , text = data_buy , name = \"\u4e70\u5165\u6210\u4ea4\" , marker = dict ( color = \"red\" , symbol = \"triangle-up\" ), hovertemplate = \"
% {text} \" , ) for dt , text in sells . items (): ipos = self . _frame2pos . get ( dt ) Y_sell . append ( self . nv [ ipos ]) X_sell . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_sell . append ( hover ) trace_sell = go . Scatter ( x = X_sell , y = Y_sell , mode = \"markers\" , text = data_sell , name = \"\u5356\u51fa\u6210\u4ea4\" , marker = dict ( color = \"green\" , symbol = \"triangle-down\" ), hovertemplate = \"
% {text} \" , ) return trace_buy , trace_sell async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"MetricsGraph"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph.__init__","text":"Parameters: Name Type Description Default bills dict \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 required metrics dict \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 required baseline_code str \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 '399300.XSHE' indicator Optional[pandas.core.frame.DataFrame] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe None Source code in omicron/plotting/metrics.py def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" ), how = \"right\" ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\"","title":"__init__()"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph.plot","text":"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe Source code in omicron/plotting/metrics.py async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"plot()"}]} \ No newline at end of file +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Omicron - Core Library for Zillionare Contents \u00b6 \u7b80\u4ecb \u00b6 Omicron\u662fZillionare\u7684\u6838\u5fc3\u6a21\u5757\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u529f\u80fd\uff1a \u884c\u60c5\u6570\u636e\u8bfb\u53d6\uff08\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u6982\u5ff5\u677f\u5757\u6570\u636e \uff0c\u4e5f\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u76f8\u5173\u64cd\u4f5c \u8bc1\u5238\u5217\u8868\u53ca\u76f8\u5173\u67e5\u8be2\u64cd\u4f5c numpy\u6570\u7ec4\u529f\u80fd\u6269\u5c55 \u6280\u672f\u6307\u6807\u53ca\u5f62\u6001\u5206\u6790\u529f\u80fd \u5404\u79cd\u5747\u7ebf\u3001\u66f2\u7ebf\u62df\u5408\u3001\u76f4\u7ebf\u659c\u7387\u548c\u5939\u89e3\u8ba1\u7b97\u3001\u66f2\u7ebf\u5e73\u6ed1\u51fd\u6570\u7b49\u3002 \u5f62\u6001\u5206\u6790\u529f\u80fd\uff0c\u5982\u4ea4\u53c9\u3001\u9876\u5e95\u641c\u7d22\u3001\u5e73\u53f0\u68c0\u6d4b\u3001RSI\u80cc\u79bb\u7b49\u3002 \u7b56\u7565\u7f16\u5199\u6846\u67b6 \uff0c\u4e0d\u4fee\u6539\u4ee3\u7801\u5373\u53ef\u540c\u65f6\u7528\u4e8e\u5b9e\u76d8\u4e0e\u56de\u6d4b\u3002 \u7ed8\u56fe\u529f\u80fd\u3002\u63d0\u4f9b\u4e86 \u4ea4\u4e92\u5f0fk\u7ebf\u56fe \u53ca \u56de\u6d4b\u62a5\u544a \u3002 \u5176\u5b83 \u4fee\u6b63Python\u7684round\u51fd\u6570\u9519\u8bef\uff0c\u6539\u7528 math_round \u5224\u65ad\u4ef7\u683c\u662f\u5426\u76f8\u7b49\u7684\u51fd\u6570\uff1a price_equal Omicron\u662f\u5927\u5bcc\u7fc1\u91cf\u5316\u6846\u67b6\u7684\u4e00\u90e8\u5206\u3002\u60a8\u5fc5\u987b\u81f3\u5c11\u5b89\u88c5\u5e76\u8fd0\u884c Omega \uff0c\u7136\u540e\u624d\u80fd\u5229\u7528omicron\u6765\u8bbf\u95ee\u4e0a\u8ff0\u6570\u636e\u3002 \u4f7f\u7528\u6587\u6863 Credits \u00b6 Zillionare-Omicron\u91c7\u7528 Python Project Wizard \u6784\u5efa\u3002","title":"\u7b80\u4ecb"},{"location":"#contents","text":"","title":"Contents"},{"location":"#\u7b80\u4ecb","text":"Omicron\u662fZillionare\u7684\u6838\u5fc3\u6a21\u5757\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u529f\u80fd\uff1a \u884c\u60c5\u6570\u636e\u8bfb\u53d6\uff08\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u6982\u5ff5\u677f\u5757\u6570\u636e \uff0c\u4e5f\u9700\u8981\u542f\u52a8 zillionare-omega \u670d\u52a1\u3002 \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u76f8\u5173\u64cd\u4f5c \u8bc1\u5238\u5217\u8868\u53ca\u76f8\u5173\u67e5\u8be2\u64cd\u4f5c numpy\u6570\u7ec4\u529f\u80fd\u6269\u5c55 \u6280\u672f\u6307\u6807\u53ca\u5f62\u6001\u5206\u6790\u529f\u80fd \u5404\u79cd\u5747\u7ebf\u3001\u66f2\u7ebf\u62df\u5408\u3001\u76f4\u7ebf\u659c\u7387\u548c\u5939\u89e3\u8ba1\u7b97\u3001\u66f2\u7ebf\u5e73\u6ed1\u51fd\u6570\u7b49\u3002 \u5f62\u6001\u5206\u6790\u529f\u80fd\uff0c\u5982\u4ea4\u53c9\u3001\u9876\u5e95\u641c\u7d22\u3001\u5e73\u53f0\u68c0\u6d4b\u3001RSI\u80cc\u79bb\u7b49\u3002 \u7b56\u7565\u7f16\u5199\u6846\u67b6 \uff0c\u4e0d\u4fee\u6539\u4ee3\u7801\u5373\u53ef\u540c\u65f6\u7528\u4e8e\u5b9e\u76d8\u4e0e\u56de\u6d4b\u3002 \u7ed8\u56fe\u529f\u80fd\u3002\u63d0\u4f9b\u4e86 \u4ea4\u4e92\u5f0fk\u7ebf\u56fe \u53ca \u56de\u6d4b\u62a5\u544a \u3002 \u5176\u5b83 \u4fee\u6b63Python\u7684round\u51fd\u6570\u9519\u8bef\uff0c\u6539\u7528 math_round \u5224\u65ad\u4ef7\u683c\u662f\u5426\u76f8\u7b49\u7684\u51fd\u6570\uff1a price_equal Omicron\u662f\u5927\u5bcc\u7fc1\u91cf\u5316\u6846\u67b6\u7684\u4e00\u90e8\u5206\u3002\u60a8\u5fc5\u987b\u81f3\u5c11\u5b89\u88c5\u5e76\u8fd0\u884c Omega \uff0c\u7136\u540e\u624d\u80fd\u5229\u7528omicron\u6765\u8bbf\u95ee\u4e0a\u8ff0\u6570\u636e\u3002 \u4f7f\u7528\u6587\u6863","title":"\u7b80\u4ecb"},{"location":"#credits","text":"Zillionare-Omicron\u91c7\u7528 Python Project Wizard \u6784\u5efa\u3002","title":"Credits"},{"location":"developer/","text":"Omicron\u7684\u5f00\u53d1\u6d41\u7a0b \u00b6 Omicron\u9075\u5faa ppw \u5b9a\u4e49\u7684\u5f00\u53d1\u6d41\u7a0b\u548c\u4ee3\u7801\u89c4\u8303\u3002\u60a8\u53ef\u4ee5\u9605\u8bfb tutorial \u6765\u4e86\u89e3\u66f4\u591a\u3002 \u7b80\u5355\u6765\u8bf4\uff0c\u901a\u8fc7ppw\u6784\u5efa\u7684\u5de5\u7a0b\uff0c\u5177\u6709\u4ee5\u4e0b\u80fd\u529b\uff1a \u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406 \u00b6 \u901a\u8fc7poetry add\u7ed9\u9879\u76ee\u589e\u52a0\u65b0\u7684\u4f9d\u8d56\u3002\u5982\u679c\u4f9d\u8d56\u9879\u4ec5\u5728\u5f00\u53d1\u73af\u5883\u4e0b\u4f7f\u7528\uff0c\u8bf7\u589e\u52a0\u4e3aExtra\u9879\uff0c\u5e76\u6b63\u786e\u5f52\u7c7b\u4e3adev, doc\u548ctest\u4e2d\u7684\u4e00\u7c7b\u3002 \u4f7f\u7528poetry lock\u6765\u9501\u5b9a\u4f9d\u8d56\u7684\u7248\u672c\u3002 \u4f7f\u7528poetry update\u66f4\u65b0\u4f9d\u8d56\u9879\u3002 flake8, isort, black \u00b6 omicron\u4f7f\u7528flake8, isort\u548cblack\u8fdb\u884c\u8bed\u6cd5\u68c0\u67e5\u548c\u4ee3\u7801\u683c\u5f0f\u5316 pre-commit \u00b6 \u4f7f\u7528pre-commit\u6765\u786e\u4fdd\u63d0\u4ea4\u7684\u4ee3\u7801\u90fd\u7b26\u5408\u89c4\u8303\u3002\u5982\u679c\u662f\u521a\u4e0b\u8f7d\u4ee3\u7801\uff0c\u8bf7\u8fd0\u884cpre-commit install\u5b89\u88c5\u94a9\u5b50\u3002 TODO: \u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d \u00b6 \u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\uff1f \u00b6 \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf \u00b6 Omicron\u5728notify\u5305\u4e2d\u63d0\u4f9b\u4e86\u53d1\u9001\u90ae\u4ef6\u548c\u9489\u9489\u6d88\u606f\u7684\u529f\u80fd\u3002\u5728\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\u524d\uff0c\u9700\u8981\u8bbe\u7f6e\u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff1a 1 2 3 4 5 6 7 DINGTALK_ACCESS_TOKEN = ? DINGTALK_SECRET = ? export MAIL_FROM = ? export MAIL_SERVER = ? export MAIL_TO = ? export MAIL_PASSWORD = ? \u4e0a\u8ff0\u73af\u5883\u53d8\u91cf\u5df2\u5728gh://zillionare/omicron\u4e2d\u8bbe\u7f6e\u3002\u5982\u679c\u60a8fork\u4e86omicron\u5e76\u4e14\u60f3\u901a\u8fc7github actions\u8fdb\u884c\u6d4b\u8bd5\uff0c\u8bf7\u5728\u60a8\u7684repo\u4e2d\u8bbe\u7f6e\u76f8\u5e94\u7684secrets\u3002 \u542f\u52a8\u6d4b\u8bd5 \u00b6 \u901a\u8fc7tox\u6765\u8fd0\u884c\u6d4b\u8bd5\u3002tox\u5c06\u542f\u52a8\u5fc5\u8981\u7684\u6d4b\u8bd5\u73af\u5883\uff08\u901a\u8fc7 stop_service.sh \u548c start_service.sh \uff09\u3002 \u6587\u6863 \u00b6 \u6587\u6863\u7531\u4e24\u90e8\u5206\u7ec4\u6210\u3002\u4e00\u90e8\u5206\u662f\u9879\u76ee\u6587\u6863\uff0c\u5b58\u653e\u5728docs\u76ee\u5f55\u4e0b\u3002\u53e6\u4e00\u90e8\u5206\u662fAPI\u6587\u6863\uff0c\u5b83\u4eec\u4ece\u6e90\u4ee3\u7801\u7684\u6ce8\u91ca\u4e2d\u63d0\u53d6\u3002\u751f\u6210\u6587\u6863\u7684\u5de5\u5177\u662fmkdocs\u3002API\u6587\u6863\u7684\u63d0\u53d6\u5219\u7531mkdocs\u7684\u63d2\u4ef6mkdocstrings\u63d0\u53d6\u3002","title":"\u5f00\u53d1\u8005\u6587\u6863"},{"location":"developer/#omicron\u7684\u5f00\u53d1\u6d41\u7a0b","text":"Omicron\u9075\u5faa ppw \u5b9a\u4e49\u7684\u5f00\u53d1\u6d41\u7a0b\u548c\u4ee3\u7801\u89c4\u8303\u3002\u60a8\u53ef\u4ee5\u9605\u8bfb tutorial \u6765\u4e86\u89e3\u66f4\u591a\u3002 \u7b80\u5355\u6765\u8bf4\uff0c\u901a\u8fc7ppw\u6784\u5efa\u7684\u5de5\u7a0b\uff0c\u5177\u6709\u4ee5\u4e0b\u80fd\u529b\uff1a","title":"Omicron\u7684\u5f00\u53d1\u6d41\u7a0b"},{"location":"developer/#\u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406","text":"\u901a\u8fc7poetry add\u7ed9\u9879\u76ee\u589e\u52a0\u65b0\u7684\u4f9d\u8d56\u3002\u5982\u679c\u4f9d\u8d56\u9879\u4ec5\u5728\u5f00\u53d1\u73af\u5883\u4e0b\u4f7f\u7528\uff0c\u8bf7\u589e\u52a0\u4e3aExtra\u9879\uff0c\u5e76\u6b63\u786e\u5f52\u7c7b\u4e3adev, doc\u548ctest\u4e2d\u7684\u4e00\u7c7b\u3002 \u4f7f\u7528poetry lock\u6765\u9501\u5b9a\u4f9d\u8d56\u7684\u7248\u672c\u3002 \u4f7f\u7528poetry update\u66f4\u65b0\u4f9d\u8d56\u9879\u3002","title":"\u57fa\u4e8epoetry\u8fdb\u884c\u4f9d\u8d56\u7ba1\u7406"},{"location":"developer/#flake8-isort-black","text":"omicron\u4f7f\u7528flake8, isort\u548cblack\u8fdb\u884c\u8bed\u6cd5\u68c0\u67e5\u548c\u4ee3\u7801\u683c\u5f0f\u5316","title":"flake8, isort, black"},{"location":"developer/#pre-commit","text":"\u4f7f\u7528pre-commit\u6765\u786e\u4fdd\u63d0\u4ea4\u7684\u4ee3\u7801\u90fd\u7b26\u5408\u89c4\u8303\u3002\u5982\u679c\u662f\u521a\u4e0b\u8f7d\u4ee3\u7801\uff0c\u8bf7\u8fd0\u884cpre-commit install\u5b89\u88c5\u94a9\u5b50\u3002","title":"pre-commit"},{"location":"developer/#todo-\u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d","text":"","title":"TODO: \u5c06\u901a\u7528\u90e8\u5206\u8f6c\u6362\u5230\u5927\u5bcc\u7fc1\u7684\u5f00\u53d1\u8005\u6307\u5357\u4e2d"},{"location":"developer/#\u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5","text":"","title":"\u5982\u4f55\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\uff1f"},{"location":"developer/#\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf","text":"Omicron\u5728notify\u5305\u4e2d\u63d0\u4f9b\u4e86\u53d1\u9001\u90ae\u4ef6\u548c\u9489\u9489\u6d88\u606f\u7684\u529f\u80fd\u3002\u5728\u8fdb\u884c\u5355\u5143\u6d4b\u8bd5\u524d\uff0c\u9700\u8981\u8bbe\u7f6e\u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff1a 1 2 3 4 5 6 7 DINGTALK_ACCESS_TOKEN = ? DINGTALK_SECRET = ? export MAIL_FROM = ? export MAIL_SERVER = ? export MAIL_TO = ? export MAIL_PASSWORD = ? \u4e0a\u8ff0\u73af\u5883\u53d8\u91cf\u5df2\u5728gh://zillionare/omicron\u4e2d\u8bbe\u7f6e\u3002\u5982\u679c\u60a8fork\u4e86omicron\u5e76\u4e14\u60f3\u901a\u8fc7github actions\u8fdb\u884c\u6d4b\u8bd5\uff0c\u8bf7\u5728\u60a8\u7684repo\u4e2d\u8bbe\u7f6e\u76f8\u5e94\u7684secrets\u3002","title":"\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf"},{"location":"developer/#\u542f\u52a8\u6d4b\u8bd5","text":"\u901a\u8fc7tox\u6765\u8fd0\u884c\u6d4b\u8bd5\u3002tox\u5c06\u542f\u52a8\u5fc5\u8981\u7684\u6d4b\u8bd5\u73af\u5883\uff08\u901a\u8fc7 stop_service.sh \u548c start_service.sh \uff09\u3002","title":"\u542f\u52a8\u6d4b\u8bd5"},{"location":"developer/#\u6587\u6863","text":"\u6587\u6863\u7531\u4e24\u90e8\u5206\u7ec4\u6210\u3002\u4e00\u90e8\u5206\u662f\u9879\u76ee\u6587\u6863\uff0c\u5b58\u653e\u5728docs\u76ee\u5f55\u4e0b\u3002\u53e6\u4e00\u90e8\u5206\u662fAPI\u6587\u6863\uff0c\u5b83\u4eec\u4ece\u6e90\u4ee3\u7801\u7684\u6ce8\u91ca\u4e2d\u63d0\u53d6\u3002\u751f\u6210\u6587\u6863\u7684\u5de5\u5177\u662fmkdocs\u3002API\u6587\u6863\u7684\u63d0\u53d6\u5219\u7531mkdocs\u7684\u63d2\u4ef6mkdocstrings\u63d0\u53d6\u3002","title":"\u6587\u6863"},{"location":"history/","text":"History \u00b6 2.0.0-alpha76 \u00b6 \u589e\u52a0backtestlog\u6a21\u5757\uff0c\u7528\u4e8e\u8f93\u51fa\u56de\u6d4b\u65e5\u5fd7\u65f6\uff0c\u5c06\u65f6\u95f4\u66ff\u6362\u4e3a\u56de\u6d4b\u65f6\u95f4 \u589e\u52a0\u884c\u60c5\u9884\u53d6\u529f\u80fd \u589e\u52a0\u56de\u6d4b\u62a5\u544a\u4e2d\u7ed8\u5236\u81ea\u5b9a\u4e49\u6307\u6807\u529f\u80fd\uff08\u4ec5\u652f\u6301Scatter) 2.0.0-alpha.69 \u00b6 BaseStrategy\u589e\u52a0 available_shares \u65b9\u6cd5 2.0.0-alpha.68 \u00b6 \u589e\u52a0\u4e86MetricsGraph \u589e\u52a0Strategy\u57fa\u7c7b Candlestick\u589e\u52a0\u4e86\u5e03\u6797\u5e26\u6307\u6807 2.0.0-alpha.49 (2022-09-16) \u00b6 \u4fee\u8ba2\u4e86\u5b89\u88c5\u6587\u6863\u3002 \u79fb\u9664\u4e86windows\u4e0b\u5bf9ta-lib\u7684\u4f9d\u8d56\u3002\u8bf7\u53c2\u8003 \u5b89\u88c5\u6307\u5357 \u4ee5\u83b7\u53d6\u5728windows\u4e0b\u5b89\u88c5ta-lib\u7684\u65b9\u6cd5\u3002 \u66f4\u65b0\u4e86poetry.lock\u6587\u4ef6\u3002\u5728\u4e0a\u4e00\u7248\u4e2d\uff0c\u8be5\u6587\u4ef6\u4e0epyproject.toml\u4e0d\u540c\u6b65\uff0c\u5bfc\u81f4\u5b89\u88c5\u65f6\u8fdb\u884c\u7248\u672c\u9501\u5b9a\uff0c\u5ef6\u957f\u4e86\u5b89\u88c5\u65f6\u95f4\u3002 \u4fee\u590d\u4e86k\u7ebf\u56fe\u6807\u8bb0\u9876\u548c\u5e95\u65f6\uff0c\u6807\u8bb0\u79bb\u88ab\u6807\u6ce8\u7684\u70b9\u592a\u8fdc\u7684\u95ee\u9898\u3002 2.0.0-alpha.46 (2022-09-10) \u00b6 #40 \u589e\u52a0k\u7ebf\u56fe\u7ed8\u5236\u529f\u80fd\u3002 \u672c\u6b21\u4fee\u8ba2\u589e\u52a0\u4e86\u5bf9plotly, ckwrap, ta-lib\u7684\u4f9d\u8d56\u3002 \u5c06\u539f\u5c5e\u4e8eomicron.talib\u5305\u4e2d\u7684bars_since, find_runs\u7b49\u8ddf\u6570\u7ec4\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u79fb\u5165omicron.extensions.np\u4e2d\u3002 2.0.0-alpha.45 (2022-09-08) \u00b6 #39 fixed. removed dependency of postgres removed funds update arrow's version to be great than 1.2 lock aiohttp's version to >3.8, <4.0> 2.0.0-alpha.35 (2022-07-13) \u00b6 fix issue in security exit date comparison, Security.eval(). 2.0.0-alpha.34 (2022-07-13) \u00b6 change to sync call for Security.select() date parameter of Security.select(): if date >= today, it will use the data in cache, otherwise, query from database. 0.3.1 (2020-12-11) \u00b6 this version introduced no features, just a internal amendment release, we're migrating to poetry build system. 0.3.0 (2020-11-22) \u00b6 Calendar, Triggers and time frame calculation Security list Bars with turnover Valuation 0.1.0 (2020-04-28) \u00b6 First release on PyPI.","title":"\u7248\u672c\u5386\u53f2"},{"location":"history/#history","text":"","title":"History"},{"location":"history/#200-alpha76","text":"\u589e\u52a0backtestlog\u6a21\u5757\uff0c\u7528\u4e8e\u8f93\u51fa\u56de\u6d4b\u65e5\u5fd7\u65f6\uff0c\u5c06\u65f6\u95f4\u66ff\u6362\u4e3a\u56de\u6d4b\u65f6\u95f4 \u589e\u52a0\u884c\u60c5\u9884\u53d6\u529f\u80fd \u589e\u52a0\u56de\u6d4b\u62a5\u544a\u4e2d\u7ed8\u5236\u81ea\u5b9a\u4e49\u6307\u6807\u529f\u80fd\uff08\u4ec5\u652f\u6301Scatter)","title":"2.0.0-alpha76"},{"location":"history/#200-alpha69","text":"BaseStrategy\u589e\u52a0 available_shares \u65b9\u6cd5","title":"2.0.0-alpha.69"},{"location":"history/#200-alpha68","text":"\u589e\u52a0\u4e86MetricsGraph \u589e\u52a0Strategy\u57fa\u7c7b Candlestick\u589e\u52a0\u4e86\u5e03\u6797\u5e26\u6307\u6807","title":"2.0.0-alpha.68"},{"location":"history/#200-alpha49-2022-09-16","text":"\u4fee\u8ba2\u4e86\u5b89\u88c5\u6587\u6863\u3002 \u79fb\u9664\u4e86windows\u4e0b\u5bf9ta-lib\u7684\u4f9d\u8d56\u3002\u8bf7\u53c2\u8003 \u5b89\u88c5\u6307\u5357 \u4ee5\u83b7\u53d6\u5728windows\u4e0b\u5b89\u88c5ta-lib\u7684\u65b9\u6cd5\u3002 \u66f4\u65b0\u4e86poetry.lock\u6587\u4ef6\u3002\u5728\u4e0a\u4e00\u7248\u4e2d\uff0c\u8be5\u6587\u4ef6\u4e0epyproject.toml\u4e0d\u540c\u6b65\uff0c\u5bfc\u81f4\u5b89\u88c5\u65f6\u8fdb\u884c\u7248\u672c\u9501\u5b9a\uff0c\u5ef6\u957f\u4e86\u5b89\u88c5\u65f6\u95f4\u3002 \u4fee\u590d\u4e86k\u7ebf\u56fe\u6807\u8bb0\u9876\u548c\u5e95\u65f6\uff0c\u6807\u8bb0\u79bb\u88ab\u6807\u6ce8\u7684\u70b9\u592a\u8fdc\u7684\u95ee\u9898\u3002","title":"2.0.0-alpha.49 (2022-09-16)"},{"location":"history/#200-alpha46-2022-09-10","text":"#40 \u589e\u52a0k\u7ebf\u56fe\u7ed8\u5236\u529f\u80fd\u3002 \u672c\u6b21\u4fee\u8ba2\u589e\u52a0\u4e86\u5bf9plotly, ckwrap, ta-lib\u7684\u4f9d\u8d56\u3002 \u5c06\u539f\u5c5e\u4e8eomicron.talib\u5305\u4e2d\u7684bars_since, find_runs\u7b49\u8ddf\u6570\u7ec4\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u79fb\u5165omicron.extensions.np\u4e2d\u3002","title":"2.0.0-alpha.46 (2022-09-10)"},{"location":"history/#200-alpha45-2022-09-08","text":"#39 fixed. removed dependency of postgres removed funds update arrow's version to be great than 1.2 lock aiohttp's version to >3.8, <4.0>","title":"2.0.0-alpha.45 (2022-09-08)"},{"location":"history/#200-alpha35-2022-07-13","text":"fix issue in security exit date comparison, Security.eval().","title":"2.0.0-alpha.35 (2022-07-13)"},{"location":"history/#200-alpha34-2022-07-13","text":"change to sync call for Security.select() date parameter of Security.select(): if date >= today, it will use the data in cache, otherwise, query from database.","title":"2.0.0-alpha.34 (2022-07-13)"},{"location":"history/#031-2020-12-11","text":"this version introduced no features, just a internal amendment release, we're migrating to poetry build system.","title":"0.3.1 (2020-12-11)"},{"location":"history/#030-2020-11-22","text":"Calendar, Triggers and time frame calculation Security list Bars with turnover Valuation","title":"0.3.0 (2020-11-22)"},{"location":"history/#010-2020-04-28","text":"First release on PyPI.","title":"0.1.0 (2020-04-28)"},{"location":"installation/","text":"1. \u5b89\u88c5 \u00b6 \u8981\u4f7f\u7528Omicron\u6765\u83b7\u53d6\u884c\u60c5\u6570\u636e\uff0c\u8bf7\u5148\u5b89\u88c5 Omega \uff0c\u5e76\u6309\u8bf4\u660e\u6587\u6863\u8981\u6c42\u5b8c\u6210\u521d\u59cb\u5316\u914d\u7f6e\u3002 \u7136\u540e\u5728\u5f00\u53d1\u673a\u4e0a\uff0c\u8fd0\u884c\u4e0b\u9762\u7684\u547d\u4ee4\u5b89\u88c5Omicron: 1 pip install zillionare-omicron omicron\u4f9d\u8d56numpy, pandas, scipy, sklearn\u3002\u8fd9\u4e9b\u5e93\u7684\u4f53\u79ef\u6bd4\u8f83\u5927\uff0c\u56e0\u6b64\u5728\u5b89\u88c5omicron\u65f6\uff0c\u8bf7\u4fdd\u6301\u7f51\u7edc\u8fde\u63a5\u7545\u901a\uff0c\u5fc5\u8981\u65f6\uff0c\u8bf7\u6dfb\u52a0\u963f\u91cc\u6216\u8005\u6e05\u534e\u7684PyPI\u955c\u50cf\u3002 omicron\u8fd8\u4f9d\u8d56\u4e8etalib, zigzag, ciso8601\u7b49\u9ad8\u6027\u80fd\u7684C/C++\u5e93\u3002\u5b89\u88c5\u8fd9\u4e9b\u5e93\u5f80\u5f80\u9700\u8981\u5728\u60a8\u672c\u673a\u6267\u884c\u4e00\u4e2a\u7f16\u8bd1\u8fc7\u7a0b\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6b65\u9aa4\u5b8c\u6210\uff1a \u5b89\u88c5\u539f\u751f\u5e93 Windows Linux MacOS \u6ce8\u610f\u6211\u4eec\u4e0d\u652f\u630132\u4f4dwindows \u8bf7\u8ddf\u968f windows\u4e0b\u5b89\u88c5omicron \u6765\u5b8c\u6210\u5b89\u88c5\u3002 \u8bf7\u6267\u884c\u4e0b\u9762\u7684\u811a\u672c\u4ee5\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 1 2 3 4 5 6 7 sudo apt update && sudo apt upgrade -y && sudo apt autoremove -y sudo apt-get install build-essential -y curl -L http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz | tar -xzv -C /tmp/ cd /tmp/ta-lib ./configure --prefix = /usr make sudo make install \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 \u8bf7\u901a\u8fc7 brew install ta-lib \u6765\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u90fd\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 2. \u5e38\u89c1\u95ee\u9898 \u00b6 \u65e0\u6cd5\u8bbf\u95eeaka.ms \u00b6 \u5982\u679c\u9047\u5230aka.ms\u65e0\u6cd5\u8bbf\u95ee\u7684\u95ee\u9898\uff0c\u6709\u53ef\u80fd\u662fIP\u5730\u5740\u89e3\u6790\u7684\u95ee\u9898\u3002\u8bf7\u4ee5\u7ba1\u7406\u5458\u6743\u9650\uff0c\u6253\u5f00\u5e76\u7f16\u8f91\u4f4d\u4e8ec:\\windows\\system32\\drivers\\etc\\\u4e0b\u7684hosts\u6587\u4ef6\uff0c\u5c06\u6b64\u884c\u52a0\u5165\u5230\u6587\u4ef6\u4e2d\uff1a 1 23.41.86.106 aka.ms","title":"\u5b89\u88c5"},{"location":"installation/#1-\u5b89\u88c5","text":"\u8981\u4f7f\u7528Omicron\u6765\u83b7\u53d6\u884c\u60c5\u6570\u636e\uff0c\u8bf7\u5148\u5b89\u88c5 Omega \uff0c\u5e76\u6309\u8bf4\u660e\u6587\u6863\u8981\u6c42\u5b8c\u6210\u521d\u59cb\u5316\u914d\u7f6e\u3002 \u7136\u540e\u5728\u5f00\u53d1\u673a\u4e0a\uff0c\u8fd0\u884c\u4e0b\u9762\u7684\u547d\u4ee4\u5b89\u88c5Omicron: 1 pip install zillionare-omicron omicron\u4f9d\u8d56numpy, pandas, scipy, sklearn\u3002\u8fd9\u4e9b\u5e93\u7684\u4f53\u79ef\u6bd4\u8f83\u5927\uff0c\u56e0\u6b64\u5728\u5b89\u88c5omicron\u65f6\uff0c\u8bf7\u4fdd\u6301\u7f51\u7edc\u8fde\u63a5\u7545\u901a\uff0c\u5fc5\u8981\u65f6\uff0c\u8bf7\u6dfb\u52a0\u963f\u91cc\u6216\u8005\u6e05\u534e\u7684PyPI\u955c\u50cf\u3002 omicron\u8fd8\u4f9d\u8d56\u4e8etalib, zigzag, ciso8601\u7b49\u9ad8\u6027\u80fd\u7684C/C++\u5e93\u3002\u5b89\u88c5\u8fd9\u4e9b\u5e93\u5f80\u5f80\u9700\u8981\u5728\u60a8\u672c\u673a\u6267\u884c\u4e00\u4e2a\u7f16\u8bd1\u8fc7\u7a0b\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6b65\u9aa4\u5b8c\u6210\uff1a \u5b89\u88c5\u539f\u751f\u5e93 Windows Linux MacOS \u6ce8\u610f\u6211\u4eec\u4e0d\u652f\u630132\u4f4dwindows \u8bf7\u8ddf\u968f windows\u4e0b\u5b89\u88c5omicron \u6765\u5b8c\u6210\u5b89\u88c5\u3002 \u8bf7\u6267\u884c\u4e0b\u9762\u7684\u811a\u672c\u4ee5\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 1 2 3 4 5 6 7 sudo apt update && sudo apt upgrade -y && sudo apt autoremove -y sudo apt-get install build-essential -y curl -L http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz | tar -xzv -C /tmp/ cd /tmp/ta-lib ./configure --prefix = /usr make sudo make install \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u5c06\u81ea\u52a8\u5b8c\u6210\u3002 \u8bf7\u901a\u8fc7 brew install ta-lib \u6765\u5b8c\u6210ta-lib\u7684\u5b89\u88c5 \u73b0\u5728\u5b89\u88c5omicron\uff0c\u6240\u6709\u5176\u5b83\u4f9d\u8d56\u7684\u5b89\u88c5\u90fd\u5c06\u81ea\u52a8\u5b8c\u6210\u3002","title":"1. \u5b89\u88c5"},{"location":"installation/#2-\u5e38\u89c1\u95ee\u9898","text":"","title":"2. \u5e38\u89c1\u95ee\u9898"},{"location":"installation/#\u65e0\u6cd5\u8bbf\u95eeakams","text":"\u5982\u679c\u9047\u5230aka.ms\u65e0\u6cd5\u8bbf\u95ee\u7684\u95ee\u9898\uff0c\u6709\u53ef\u80fd\u662fIP\u5730\u5740\u89e3\u6790\u7684\u95ee\u9898\u3002\u8bf7\u4ee5\u7ba1\u7406\u5458\u6743\u9650\uff0c\u6253\u5f00\u5e76\u7f16\u8f91\u4f4d\u4e8ec:\\windows\\system32\\drivers\\etc\\\u4e0b\u7684hosts\u6587\u4ef6\uff0c\u5c06\u6b64\u884c\u52a0\u5165\u5230\u6587\u4ef6\u4e2d\uff1a 1 23.41.86.106 aka.ms","title":"\u65e0\u6cd5\u8bbf\u95eeaka.ms"},{"location":"usage/","text":"1. \u914d\u7f6e\u3001\u521d\u59cb\u5316\u548c\u5173\u95ed OMICRON \u00b6 Omicron \u4f9d\u8d56\u4e8e zillionare-omega \u670d\u52a1\u6765\u83b7\u53d6\u6570\u636e\u3002\u4f46\u5b83\u5e76\u4e0d\u76f4\u63a5\u4e0e Omega \u670d\u52a1\u901a\u8baf\uff0c\u76f8\u53cd\uff0c\u5b83\u76f4\u63a5\u8bfb\u53d6 Omega \u670d\u52a1\u5668\u4f1a\u5199\u5165\u6570\u636e\u7684 Influxdb \u548c redis \u6570\u636e\u5e93\u3002\u56e0\u6b64\uff0c\u5728\u4f7f\u7528 Omicron \u4e4b\u524d\uff0c\u6211\u4eec\u9700\u8981\u63d0\u4f9b\u8fd9\u4e24\u4e2a\u670d\u52a1\u5668\u7684\u8fde\u63a5\u5730\u5740\uff0c\u5e76\u8fdb\u884c\u521d\u59cb\u5316\u3002 1.1. \u914d\u7f6e\u548c\u521d\u59cb\u5316 \u00b6 Omicron \u4f7f\u7528 cfg4py \u6765\u7ba1\u7406\u914d\u7f6e\u3002 cfg4py \u4f7f\u7528 yaml \u6587\u4ef6\u6765\u4fdd\u5b58\u914d\u7f6e\u9879\u3002\u5728\u4f7f\u7528 cfg4py \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u5728\u67d0\u5904\u521d\u59cb\u5316 cfg4py\uff0c\u7136\u540e\u518d\u521d\u59cb\u5316 omicron: Tip \u4e3a\u4e86\u7b80\u6d01\u8d77\u89c1\uff0c\u6211\u4eec\u5728\u9876\u5c42\u4ee3\u7801\u4e2d\u76f4\u63a5\u4f7f\u7528\u4e86 async/await\u3002\u901a\u5e38\uff0c\u8fd9\u4e9b\u4ee3\u7801\u80fd\u591f\u76f4\u63a5\u5728 notebook \u4e2d\u8fd0\u884c\uff0c\u4f46\u5982\u679c\u9700\u8981\u5728\u666e\u901a\u7684 python \u811a\u672c\u4e2d\u8fd0\u884c\u8fd9\u4e9b\u4ee3\u7801\uff0c\u60a8\u901a\u5e38\u9700\u8981\u5c06\u5176\u5c01\u88c5\u5230\u4e00\u4e2a\u5f02\u6b65\u51fd\u6570\u4e2d\uff0c\u518d\u901a\u8fc7 asyncio.run \u6765\u8fd0\u884c\u5b83\u3002 1 2 3 4 5 6 7 8 9 import asyncio import cfg4py import omicron async def main (): cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () # DO YOUR GREAT JOB WITH OMICRON asyncio . run ( main ()) 1 2 3 4 5 import cfg4py import omicron cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () \u6ce8\u610f\u521d\u59cb\u5316 cfg4py \u65f6\uff0c\u9700\u8981\u63d0\u4f9b\u5305\u542b\u914d\u7f6e\u6587\u4ef6\u7684 \u6587\u4ef6\u5939 \u7684\u8def\u5f84\uff0c\u800c \u4e0d\u662f\u914d\u7f6e\u6587\u4ef6 \u7684\u8def\u5f84\u3002\u914d\u7f6e\u6587\u4ef6\u540d\u5fc5\u987b\u4e3a defaults.yml\u3002 \u60a8\u81f3\u5c11\u5e94\u8be5\u4e3a omicron \u914d\u7f6e Redis \u8fde\u63a5\u4e32\u548c influxdb \u8fde\u63a5\u4e32\u3002\u4e0b\u9762\u662f\u5e38\u7528\u914d\u7f6e\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 # DEFAULTS.YAML redis : dsn : redis://${REDIS_HOST}:${REDIS_PORT} influxdb : url : http://${INFLUXDB_HOST}:${INFLUXDB_PORT} token : ${INFLUXDB_TOKEN} org : ${INFLUXDB_ORG} bucket_name : ${INFLUXDB_BUCKET_NAME} enable_compress : true max_query_size : 150000 notify : mail_from : ${MAIL_FROM} mail_to : - ${MAIL_TO} mail_server : ${MAIL_SERVER} dingtalk_access_token : ${DINGTALK_ACCESS_TOKEN} dingtalk_secret : ${DINGTALK_SECRET} \u8bf7\u6839\u636e\u60a8\u5b9e\u9645\u73af\u5883\u914d\u7f6e\u6765\u66f4\u6539\u4e0a\u8ff0\u6587\u4ef6\u3002\u4e0a\u8ff0\u914d\u7f6e\u4e2d\uff0c${{REDIS_HOST}}\u610f\u5473\u7740\u73af\u5883\u53d8\u91cf\u3002\u5982\u679c\u662f windows\uff0c\u60a8\u9700\u8981\u5728\u7cfb\u7edf > \u73af\u5883\u53d8\u91cf\u4e2d\u8fdb\u884c\u8bbe\u7f6e\u3002\u5982\u679c\u662f Linux \u6216\u8005 Mac\uff0c\u60a8\u9700\u8981\u4fee\u6539.bashrc\uff0c\u4f8b\u5982\uff1a 1 export REDIS_HOST=localhost 1.2. \u5173\u95ed omicron \u00b6 \u5728\u60a8\u7684\u8fdb\u7a0b\u5373\u5c06\u9000\u51fa\u4e4b\u524d\uff0c\u8bf7\u8bb0\u5f97\u5173\u95ed omicron\u3002\u5982\u679c\u60a8\u662f\u5728 notebook \u4e2d\u4f7f\u7528 omicron, \u5219\u53ef\u4ee5\u5ffd\u7565\u6b64\u6b65\u805a\u3002 1 await omicron . close () 2. \u6570\u636e\u8bfb\u53d6 \u00b6 2.1. \u8bc1\u5238\u5217\u8868 \u00b6 Security \u548c Query \u63d0\u4f9b\u4e86\u8bc1\u5238\u5217\u8868\u548c\u67e5\u8be2\u64cd\u4f5c\u3002\u67e5\u8be2\u88ab\u8bbe\u8ba1\u6210\u4e3a\u94fe\u5f0f API\u3002\u901a\u5e38\uff0c\u6211\u4eec\u901a\u8fc7\u8c03\u7528 Security.select() \u6765\u751f\u6210\u4e00\u4e2a Query \u5bf9\u8c61\uff0c\u7136\u540e\u53ef\u4ee5\u9488\u5bf9\u6b64\u5bf9\u8c61\uff0c\u8fdb\u884c\u5404\u79cd\u8fc7\u67e5\u8be2\u8fc7\u6ee4\uff0c\u6700\u540e\uff0c\u6211\u4eec\u8c03\u7528 query.eval() \u65b9\u6cd5\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\uff0c\u5e76\u8fd4\u56de\u7ed3\u679c\u3002 2.1.1. \u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801 \u00b6 \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\u6765\u83b7\u53d6\u67d0\u4e00\u5929\u7684\u8bc1\u5238\u5217\u8868\uff1a 1 2 3 4 5 6 7 # 4. ASSUME YOU HAVE OMICRON INIT dt = datetime . date ( 2022 , 5 , 20 ) query = Security . select ( dt ) codes = await query . eval () print ( codes ) # THE OUTPUTS IS LIKE [\"000001.XSHE\", \"000004.XSHE\", ...] \u8fd9\u91cc\u7684 dt \u5982\u679c\u6ca1\u6709\u63d0\u4f9b\u7684\u8bdd\uff0c\u5c06\u4f7f\u7528\u6700\u65b0\u7684\u8bc1\u5238\u5217\u8868\u3002\u4f46\u5728\u56de\u6d4b\u4e2d\uff0c\u60a8\u901a\u5e38\u4e0d\u540c\u65f6\u95f4\u7684\u8bc1\u5238\u5217\u8868\uff0c\u56e0\u6b64\uff0c dt \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\u662f\u5fc5\u987b\u7684\uff0c\u5426\u5219\uff0c\u60a8\u5c06\u5f15\u5165\u672a\u6765\u6570\u636e\u3002 2.1.2. \u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570 \u00b6 1 2 3 query = Security . select ( dt ) codes = await query . types ([ \"stock\" ]) . eval () print ( codes ) 2.1.3. \u6392\u9664\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09 \u00b6 1 2 3 query = Security . select ( dt ) codes = await query . exclude_st () . exclude_kcb () . exclude_cyb () . eval () print ( codes ) 2.1.4. \u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09 \u00b6 1 2 3 4 query = Security . select ( dt ) codes = await query . only_kcb () . only_st () . only_cyb () . eval () print ( codes ) #\u5f97\u5230\u7a7a\u5217\u8868 2.1.5. \u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2 \u00b6 A \u80a1\u7684\u8bc1\u5238\u5728\u6807\u8bc6\u4e0a\uff0c\u4e00\u822c\u6709\u4ee3\u7801\uff08code \u6216\u8005 symbol)\u3001\u62fc\u97f3\u7b80\u5199 (name) \u548c\u6c49\u5b57\u8868\u793a\u540d (display_name) \u4e09\u79cd\u6807\u8bc6\u3002\u6bd4\u5982\u4e2d\u56fd\u5e73\u5b89\uff0c\u5176\u4ee3\u7801\u4e3a 601318.XSHG; \u5176\u62fc\u97f3\u7b80\u5199\u4e3a ZGPA\uff1b\u800c\u4e2d\u56fd\u5e73\u5b89\u88ab\u79f0\u4e3a\u5b83\u7684\u522b\u540d ( alias )\u3002 \u5982\u679c\u8981\u67e5\u8be2\u6240\u6709\u4e2d\u5b57\u5934\u7684\u80a1\u7968\uff1a 1 2 3 query = Security . select ( dt ) codes = await query . alias_like ( \"\u4e2d\" ) . eval () print ( codes ) 2.1.6. \u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f \u00b6 \u901a\u8fc7\u524d\u9762\u7684\u67e5\u8be2\u6211\u4eec\u53ef\u4ee5\u5f97\u5230\u4e00\u4e2a\u8bc1\u5238\u5217\u8868\uff0c\u5982\u679c\u8981\u5f97\u5230\u5177\u4f53\u7684\u4fe1\u606f\uff0c\u53ef\u4ee5\u901a\u8fc7 info \u63a5\u53e3\u6765\u67e5\u8be2\uff1a 1 2 3 dt = datetime . date ( 2022 , 5 , 20 ) info = await Security . info ( \"688001.XSHG\" , dt ) print ( info ) \u8f93\u51fa\u4e3a\uff1a 1 2 3 4 5 6 7 8 { ' t ype' : 's t ock' , 'display_ na me' : '\u534e\u5174\u6e90\u521b' , 'alias' : '\u534e\u5174\u6e90\u521b' , 'e n d' : da tet ime.da te ( 2200 , 1 , 1 ) , 's tart ' : da tet ime.da te ( 2019 , 7 , 22 ) , ' na me' : 'HXYC' } 2.2. \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97 \u00b6 Omicron \u4e0d\u4ec5\u63d0\u4f9b\u4e86\u4ea4\u6613\u65e5\u5386\uff0c\u4e0e\u5176\u5b83\u91cf\u5316\u6846\u67b6\u76f8\u6bd4\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u4e0e\u65f6\u95f4\u76f8\u5173\u7684\u8fd0\u7b97\u64cd\u4f5c\u3002\u8fd9\u4e9b\u64cd\u4f5c\u90fd\u6709\u8be6\u7ec6\u7684\u6587\u6863\u548c\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 TimeFrame \u6765\u8fdb\u4e00\u6b65\u9605\u8bfb\u3002 omicron \u4e2d\uff0c\u5e38\u5e38\u4f1a\u9047\u5230\u65f6\u95f4\u5e27 (Time Frame) \u8fd9\u4e2a\u6982\u5ff5\u3002\u56e0\u4e3a\u884c\u60c5\u6570\u636e\u90fd\u662f\u6309\u4e00\u5b9a\u7684\u65f6\u95f4\u957f\u5ea6\u7ec4\u7ec7\u7684\uff0c\u6bd4\u5982 5 \u5206\u949f\uff0c1 \u5929\uff0c\u7b49\u7b49\u3002\u56e0\u6b64\uff0c\u5728 omicron \u4e2d\uff0c\u6211\u4eec\u7ecf\u5e38\u4f7f\u7528\u67d0\u4e2a\u65f6\u95f4\u7247\u7ed3\u675f\u7684\u65f6\u95f4\uff0c\u6765\u6807\u8bc6\u8fd9\u4e2a\u65f6\u95f4\u7247\uff0c\u5e76\u5c06\u5176\u79f0\u4e4b\u4e3a\u5e27 (Time Frame)\u3002 omicron \u4e2d\uff0c\u6211\u4eec\u652f\u6301\u7684\u65f6\u95f4\u5e27\u5305\u62ec\u65e5\u5185\u7684\u5206\u949f\u5e27 (FrameType.MIN1), 5 \u5206\u949f\u5e27 (FrameType.MIN5), 15 \u5206\u949f\u5e27\u300130 \u5206\u949f\u5e27\u548c 60 \u5206\u949f\u5e27\uff0c\u4ee5\u53ca\u65e5\u7ebf\u7ea7\u522b\u7684 FrameType.DAY, FrameType.WEEK \u7b49\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u7c7b\u578b\u8bf4\u660e\uff0c\u8bf7\u53c2\u89c1 coretypes omicron \u63d0\u4f9b\u7684\u4ea4\u6613\u65e5\u5386\u8d77\u59cb\u4e8e 2005 \u5e74 1 \u6708 4 \u65e5\u3002\u63d0\u4f9b\u7684\u884c\u60c5\u6570\u636e\uff0c\u6700\u65e9\u4ece\u8fd9\u4e00\u5929\u8d77\u3002 \u5927\u81f4\u4e0a\uff0comicron \u63d0\u4f9b\u4e86\u4ee5\u4e0b\u65f6\u95f4\u5e27\u64cd\u4f5c\uff1a 2.2.1. \u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb \u00b6 \u5982\u679c\u4eca\u5929\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u60a8\u60f3\u5f97\u5230 100 \u5929\u524d\u7684\u4ea4\u6613\u65e5\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 day_shift: 1 2 3 4 from omicron import tf dt = datetime . date ( 2022 , 5 , 20 ) tf . day_shift ( dt , - 100 ) \u8f93\u51fa\u662f datetime.date(2021, 12, 16)\u3002\u5728\u8fd9\u91cc\uff0cday_shift \u7684\u7b2c\u4e8c\u4e2a\u53c2\u6570 n \u662f\u504f\u79fb\u91cf\uff0c\u5f53\u5b83\u5c0f\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u524d n \u4e2a\u4ea4\u6613\u65e5\uff1b\u5f53\u5b83\u5927\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u4e4b\u540e\u7684 n \u4e2a\u4ea4\u6613\u65e5\u3002 \u6bd4\u5982\u6709\u610f\u601d\u7684\u662f n == 0 \u7684\u65f6\u5019\u3002\u5bf9\u4e0a\u8ff0 dt \uff0cday_shift(dt, 0) \u5f97\u5230\u7684\u4ecd\u7136\u662f\u540c\u4e00\u5929\uff0c\u4f46\u5982\u679c dt \u662f 2022 \u5e74 5 \u6708 21 \u65e5\u662f\u5468\u516d\uff0c\u5219 day_shift(datetime.date(2022, 5, 21)) \u5c06\u8fd4\u56de 2022 \u5e74 5 \u6708 20 \u65e5\u3002\u56e0\u4e3a 5 \u6708 21 \u65e5\u8fd9\u4e00\u5929\u662f\u5468\u516d\uff0c\u4e0d\u662f\u4ea4\u6613\u65e5\uff0cday_shift \u5c06\u8fd4\u56de\u5176\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff0c\u8fd9\u5728\u591a\u6570\u60c5\u51b5\u4e0b\u4f1a\u975e\u5e38\u65b9\u4fbf\u3002 \u9664\u4e86 day_shift \u5916\uff0ctimeframe \u8fd8\u63d0\u4f9b\u4e86\u7c7b\u4f3c\u51fd\u6570\u6bd4\u5982 week_shift \u7b49\u3002\u4e00\u822c\u5730\uff0c\u60a8\u53ef\u4ee5\u7528 shift(dt, n, frame_type) \u6765\u5bf9\u4efb\u610f\u652f\u6301\u7684\u65f6\u95f4\u8fdb\u884c\u504f\u79fb\u3002 2.2.2. \u8fb9\u754c\u64cd\u4f5c ceiling \u548c floor \u00b6 \u5f88\u591a\u65f6\u5019\u6211\u4eec\u9700\u8981\u77e5\u9053\u5177\u4f53\u7684\u67d0\u4e2a\u65f6\u95f4\u70b9 (moment) \u6240\u5c5e\u7684\u5e27\u3002\u5982\u679c\u8981\u53d6\u5176\u4e0a\u4e00\u5e27\uff0c\u5219\u53ef\u4ee5\u7528 floor \u64cd\u4f5c\uff0c\u53cd\u4e4b\uff0c\u4f7f\u7528 ceiling\u3002 1 2 tf . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) # OUTPUT IS DATETIME.DATE(2005, 1, 7) 2.2.3. \u65f6\u95f4\u8f6c\u6362 \u00b6 \u4e3a\u4e86\u52a0\u5feb\u901f\u5ea6\uff0c\u4ee5\u53ca\u65b9\u4fbf\u6301\u4e45\u5316\u5b58\u50a8\uff0c\u5728 timeframe \u5185\u90e8\uff0c\u6709\u65f6\u5019\u4f7f\u7528\u6574\u6570\u6765\u8868\u793a\u65f6\u95f4\u3002\u6bd4\u5982 20220502 \u8868\u793a\u7684\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u800c 202205220931 \u5219\u8868\u793a 2022 \u5e74 5 \u6708 20 \u65e5 9 \u65f6 31 \u5206\u949f\u3002 \u8fd9\u79cd\u8868\u793a\u6cd5\uff0c\u6709\u65f6\u5019\u8981\u6c42\u6211\u4eec\u8fdb\u884c\u4e00\u4e9b\u8f6c\u6362\uff1a 1 2 3 4 5 6 7 8 9 10 # \u5c06\u6574\u6570\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u4e3a\u65e5\u671f tf . int2date ( 20220522 ) # datetime.date(2022, 5, 22) # \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a\u65f6\u95f4 tf . int2time ( 202205220931 ) # datetime.datetime(2022, 5, 22, 9, 31) # \u5c06\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u6574\u6570 tf . date2int ( datetime . date ( 2022 , 5 , 22 )) # 20220520 # \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u4e3a\u65f6\u95f4 tf . date2time ( datetime . datetime ( 2022 , 5 , 22 , 9 , 21 )) # 202205220921 2.2.4. \u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27 \u00b6 \u6709\u65f6\u5019\u6211\u4eec\u9700\u8981\u5f97\u5230 start \u548c end \u4e4b\u95f4\u67d0\u4e2a\u65f6\u95f4\u5e27\u7c7b\u578b\u7684\u6240\u6709\u65f6\u95f4\u5e27\uff1a 1 2 3 4 start = arrow . get ( '2020-1-13 10:00' ) . naive end = arrow . get ( '2020-1-13 13:30' ) . naive tf . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Important \u4e0a\u9762\u7684\u793a\u4f8b\u4e2d\uff0c\u51fa\u73b0\u4e86\u53ef\u80fd\u60a8\u4e0d\u592a\u719f\u6089\u7684 naive \u5c5e\u6027\u3002\u5b83\u6307\u7684\u662f\u53d6\u4e0d\u5e26\u65f6\u533a\u7684\u65f6\u95f4\u3002\u5728 python \u4e2d\uff0c\u65f6\u95f4\u53ef\u4ee5\u5e26\u65f6\u533a\uff08timezone-aware) \u548c\u4e0d\u5e26\u65f6\u533a (naive)\u3002 \u5982\u679c\u60a8\u4f7f\u7528 datetime.datetime(2022, 5, 20)\uff0c\u5b83\u5c31\u662f\u4e0d\u5e26\u65f6\u533a\u7684\uff0c\u9664\u975e\u60a8\u4e13\u95e8\u6307\u5b9a\u65f6\u533a\u3002 \u5728 omicron \u4e2d\uff0c\u6211\u4eec\u5728\u7edd\u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u4ec5\u4f7f\u7528 naive \u8868\u793a\u7684\u65f6\u95f4\uff0c\u5373\u4e0d\u5e26\u65f6\u533a\uff0c\u5e76\u4e14\u5047\u5b9a\u65f6\u533a\u4e3a\u4e1c\u516b\u533a\uff08\u5373\u5317\u4eac\u65f6\u95f4\uff09\u3002 \u5982\u679c\u60a8\u53ea\u77e5\u9053\u7ed3\u675f\u65f6\u95f4\uff0c\u9700\u8981\u5411\u524d\u53d6 n \u4e2a\u65f6\u95f4\u5e27\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 get_frames_by_count \u3002 \u5982\u679c\u60a8\u53ea\u662f\u9700\u8981\u77e5\u9053\u5728 start \u548c end \u4e4b\u95f4\uff0c\u603b\u5171\u6709\u591a\u5c11\u4e2a\u5e27\uff0c\u8bf7\u4f7f\u7528 count_frames : 1 2 3 start = datetime . date ( 2019 , 12 , 21 ) end = datetime . date ( 2019 , 12 , 21 ) tf . count_frames ( start , end , FrameType . DAY ) \u8f93\u51fa\u5c06\u662f 1\u3002\u4e0a\u8ff0\u65b9\u6cd5\u8fd8\u6709\u4e00\u4e2a\u5feb\u6377\u65b9\u6cd5\uff0c\u5373 count_day_frames \uff0c\u5e76\u4e14\uff0c\u5bf9 week, month, quaters \u4e5f\u662f\u4e00\u6837\u3002 2.3. \u8bfb\u53d6\u884c\u60c5\u6570\u636e \u00b6 \u73b0\u5728\uff0c\u8ba9\u6211\u4eec\u6765\u83b7\u53d6\u4e00\u6bb5\u884c\u60c5\u6570\u636e\uff1a 1 2 3 4 code = \"000001.XSHE\" end = datetime . date ( 2022 , 5 , 20 ) bars = await Stock . get_bars ( code , 10 , FrameType . DAY , end ) \u8fd4\u56de\u7684 bars \u5c06\u662f\u4e00\u4e2a numpy structured array, \u5176\u7c7b\u578b\u4e3a bars_dtype \u3002\u4e00\u822c\u5730\uff0c\u5b83\u5305\u62ec\u4e86\u4ee5\u4e0b\u5b57\u6bb5\uff1a 1 2 3 4 5 6 7 8 * frame\uff08\u5e27\uff09 * open\uff08\u5f00\u76d8\u4ef7\uff09 * high\uff08\u6700\u9ad8\u4ef7\uff09 * low\uff08\u6700\u4f4e\u4ef7\uff09 * close\uff08\u6536\u76d8\u4ef7\uff09 * volume\uff08\u6210\u4ea4\u91cf\uff0c\u80a1\u6570\uff09 * amount\uff08\u6210\u4ea4\u989d\uff09 * factor\uff08\u590d\u6743\u56e0\u5b50\uff09 \u7f3a\u7701\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u662f\u5230 end \u4e3a\u6b62\u7684\u524d\u590d\u6743\u6570\u636e\u3002\u4f60\u53ef\u4ee5\u901a\u53c2\u6570 fq = False \u5173\u95ed\u5b83\uff0c\u6765\u83b7\u5f97\u4e0d\u590d\u6743\u6570\u636e\uff0c\u5e76\u4ee5\u6b64\u81ea\u884c\u8ba1\u7b97\u540e\u590d\u6743\u6570\u636e\u3002 \u5982\u679c\u8981\u83b7\u53d6\u67d0\u4e2a\u65f6\u95f4\u6bb5\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528 get_bars_in_range \u3002 \u4e0a\u8ff0\u65b9\u6cd5\u603b\u662f\u5c3d\u6700\u5927\u53ef\u80fd\u8fd4\u56de\u5b9e\u65f6\u6570\u636e\uff0c\u5982\u679c end \u4e3a\u5f53\u524d\u65f6\u95f4\u7684\u8bdd\uff0c\u4f46\u7531\u4e8e omega \u540c\u6b65\u5ef6\u65f6\u662f\u4e00\u5206\u949f\uff0c\u6240\u4ee5\u884c\u60c5\u6570\u636e\u6700\u591a\u53ef\u80fd\u6162\u4e00\u5206\u949f\u3002\u5982\u679c\u8981\u83b7\u53d6\u66f4\u5b9e\u65f6\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u901a\u8fc7 get_latest_price \u65b9\u6cd5\u3002 \u8981\u83b7\u6da8\u8dcc\u505c\u4ef7\u683c\u548c\u6807\u5fd7\uff0c\u8bf7\u4f7f\u7528: get_trade_price_limits trade_price_limits_flags trade_price_limit_flags_ex 2.4. \u677f\u5757\u6570\u636e \u00b6 \u63d0\u4f9b\u540c\u82b1\u987a\u677f\u5757\u884c\u4e1a\u677f\u5757\u548c\u6982\u5ff5\u677f\u5757\u6570\u636e\u3002\u5728\u4f7f\u7528\u672c\u6a21\u5757\u4e4b\u524d\uff0c\u9700\u8981\u8fdb\u884c\u521d\u59cb\u5316\uff1a 1 2 3 # \u8bf7\u5148\u8fdb\u884comicron\u521d\u59cb\u5316\uff0c\u7565 from omicron.models.board import Board , BoardType Board . init ( '192.168.100.101' ) \u6b64\u5904\u7684IP\u4e3a\u5b89\u88c5omega\u670d\u52a1\u5668\u7684ip\u3002 \u901a\u8fc7 board_list \u6765\u67e5\u8be2\u6240\u6709\u7684\u677f\u5757\u3002 \u5176\u5b83\u65b9\u6cd5\u8bf7\u53c2\u770b API\u6587\u6863 3. \u7b56\u7565\u7f16\u5199 \u00b6 omicron \u901a\u8fc7 strategy \u6765\u63d0\u4f9b\u7b56\u7565\u6846\u67b6\u3002\u901a\u8fc7\u8be5\u6846\u67b6\u7f16\u5199\u7684\u7b56\u7565\uff0c\u53ef\u4ee5\u5728\u5b9e\u76d8\u548c\u56de\u6d4b\u4e4b\u95f4\u65e0\u7f1d\u8f6c\u6362 -- \u6839\u636e\u521d\u59cb\u5316\u65f6\u4f20\u5165\u7684\u670d\u52a1\u5668\u4e0d\u540c\u800c\u81ea\u52a8\u5207\u6362\u3002 omicron \u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u7684 \u53cc\u5747\u7ebf\u7b56\u7565 \u4f5c\u4e3a\u7b56\u7565\u7f16\u5199\u7684\u793a\u8303\uff0c\u53ef\u7ed3\u5408\u5176\u6e90\u7801\uff0c\u4ee5\u53ca\u672c\u6587\u6863\u4e2d\u7684 \u5b8c\u6574\u7b56\u7565\u793a\u4f8b \u5728notebook\u4e2d\u8fd0\u884c\u67e5\u770b\u3002 \u7b56\u7565\u6846\u67b6\u63d0\u4f9b\u4e86\u56de\u6d4b\u9a71\u52a8\u903b\u8f91\u53ca\u4e00\u4e9b\u57fa\u672c\u51fd\u6570\u3002\u8981\u7f16\u5199\u81ea\u5df1\u7684\u7b56\u7565\uff0c\u60a8\u9700\u8981\u4ece\u57fa\u7c7b BaseStrategy \u6d3e\u751f\u51fa\u81ea\u5df1\u7684\u5b50\u7c7b\uff0c\u5e76\u6539\u5199\u5b83\u7684 predict \u65b9\u6cd5\u6765\u5b9e\u73b0\u8c03\u4ed3\u6362\u80a1\u3002 \u7b56\u7565\u6846\u67b6\u4f9d\u8d56\u4e8e zillionare-trader-client \uff0c\u5728\u56de\u6d4b\u65f6\uff0c\u9700\u8981\u6709 zillionare-backtesting \u63d0\u4f9b\u56de\u6d4b\u670d\u52a1\u3002\u5728\u5b9e\u76d8\u65f6\uff0c\u9700\u8981 zilllionare-gm-adaptor \u6216\u8005\u5176\u5b83\u5b9e\u76d8\u4ea4\u6613\u7f51\u5173\u63d0\u4f9b\u670d\u52a1\u3002 \u7b56\u7565\u4ee3\u7801\u53ef\u4ee5\u4e0d\u52a0\u4fee\u6539\uff0c\u5373\u53ef\u4f7f\u7528\u4e8e\u56de\u6d4b\u548c\u5b9e\u76d8\u4e24\u79cd\u573a\u666f\u3002 3.1. \u56de\u6d4b\u573a\u666f \u00b6 \u5b9e\u73b0\u7b56\u7565\u56de\u6d4b\uff0c\u4e00\u822c\u9700\u8981\u8fdb\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a 1. \u4ece\u6b64\u57fa\u7c7b\u6d3e\u751f\u51fa\u4e00\u4e2a\u7b56\u7565\u5b50\u7c7b\uff0c\u6bd4\u5982sma.py 2. \u5b50\u7c7b\u9700\u8981\u91cd\u8f7d predict \u65b9\u6cd5\uff0c\u6839\u636e\u5f53\u524d\u4f20\u5165\u7684\u65f6\u95f4\u5e27\u548c\u5e27\u7c7b\u578b\u53c2\u6570\uff0c\u83b7\u53d6\u6570\u636e\u5e76\u8fdb\u884c\u5904\u7406\uff0c\u8bc4\u4f30\u51fa\u4ea4\u6613\u4fe1\u53f7\u3002 3. \u5b50\u7c7b\u6839\u636e\u4ea4\u6613\u4fe1\u53f7\uff0c\u5728 predict \u65b9\u6cd5\u91cc\uff0c\u8c03\u7528\u57fa\u7c7b\u7684 buy \u548c sell \u65b9\u6cd5\u6765\u8fdb\u884c\u4ea4\u6613 4. \u751f\u6210\u7b56\u7565\u5b9e\u4f8b\uff0c\u901a\u8fc7\u5b9e\u4f8b\u8c03\u7528 backtest \u65b9\u6cd5\u6765\u8fdb\u884c\u56de\u6d4b\uff0c\u8be5\u65b9\u6cd5\u5c06\u6839\u636e\u7b56\u7565\u6784\u5efa\u65f6\u6307\u5b9a\u7684\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4\u3001\u7ec8\u6b62\u65f6\u95f4\u3001\u5e27\u7c7b\u578b\uff0c\u9010\u5e27\u751f\u6210\u5404\u4e2a\u65f6\u95f4\u5e27\uff0c\u5e76\u8c03\u7528\u5b50\u7c7b\u7684 predict \u65b9\u6cd5\u3002\u5982\u679c\u8c03\u7528\u65f6\u6307\u5b9a\u4e86 portfolio \u548c min_bars \u53c2\u6570\uff0c backtest \u8fd8\u5c06\u8fdb\u884c\u6570\u636e\u9884\u53d6\uff0c\u5e76\u5c06\u622a\u6b62\u5230\u5f53\u524d\u56de\u6d4b\u5e27\u65f6\u7684\u6570\u636e\u4f20\u5165\u3002 4. \u5728\u4ea4\u6613\u7ed3\u675f\u65f6\uff0c\u8c03\u7528 plot_metrics \u65b9\u6cd5\u6765\u83b7\u53d6\u5982\u4e0b\u6240\u793a\u7684\u56de\u6d4b\u6307\u6807\u56fe \u5982\u4f55\u6d3e\u751f\u5b50\u7c7b\uff0c\u53ef\u4ee5\u53c2\u8003 sma \u6e90\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 from omicron.strategy.sma import SMAStrategy sma = SMAStrategy ( url = \"\" , # the url of either backtest server, or trade server is_backtest = True , start = datetime . date ( 2023 , 2 , 3 ), end = datetime . date ( 2023 , 4 , 28 ), frame_type = FrameType . DAY , ) await sma . backtest ( portfolio = [ \"600000.XSHG\" , min_bars = 20 ]) \u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u8981\u6307\u5b9a is_backtest=True \u548c start , end \u53c2\u6570\u3002 3.2. \u56de\u6d4b\u62a5\u544a \u00b6 \u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\uff0c\u5728notebook\u4e2d\u7ed8\u5236\u56de\u6d4b\u62a5\u544a\uff1a 1 await sma . plot_metrics () \u8fd9\u5c06\u7ed8\u5236\u51fa\u7c7b\u4f3c\u4ee5\u4e0b\u56fe\uff1a 3.2.1. \u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807 \u00b6 Info Since 2.0.0.a76 \u9996\u5148\uff0c\u6211\u4eec\u53ef\u4ee5\u5728\u7b56\u7565\u7c7b\u7684predict\u65b9\u6cd5\u4e2d\u8ba1\u7b97\u51fa\u6280\u672f\u6307\u6807\uff0c\u5e76\u4fdd\u5b58\u5230\u6210\u5458\u53d8\u91cf\u4e2d\u3002\u5728\u4e0b\u9762\u7684\u793a\u4f8b\u4ee3\u7801\u4e2d\uff0c\u6211\u4eec\u5c06\u6280\u672f\u6307\u6807\u53ca\u5f53\u65f6\u7684\u65f6\u95f4\u4fdd\u5b58\u5230\u4e86\u4e00\u4e2aindicators\u6570\u7ec4\u4e2d\uff08\u6ce8\u610f\u987a\u5e8f\uff01\uff09\uff0c\u7136\u540e\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u5728\u8c03\u7528 plot_metrics\u65f6\uff0c\u5c06\u5176\u4f20\u5165\u5373\u53ef\u3002 1 2 3 4 5 6 7 indicators = [ (datetime.date(2021, 2, 3), 20.1), (datetime.date(2021, 2, 4), 20.2), ..., (datetime.date(2021, 4, 1), 20.3) ] await sma.plot_metrics(indicator) \u65f6\u95f4\u53ea\u80fd\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u65f6\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u4ea7\u751f\u65e0\u6cd5\u4e0e\u5750\u6807\u8f74\u5bf9\u9f50\u7684\u60c5\u51b5\u3002 \u52a0\u5165\u7684\u6307\u6807\u9ed8\u8ba4\u53ea\u663e\u793a\u5728legend\u4e2d\uff0c\u5982\u679c\u8981\u663e\u793a\u5728\u4e3b\u56fe\u4e0a\uff0c\u9700\u8981\u70b9\u51fblegend\u8fdb\u884c\u663e\u793a\u3002 \u6307\u6807\u9664\u53ef\u4ee5\u53e0\u52a0\u5728\u4e3b\u56fe\u4e0a\u4e4b\u5916\uff0c\u8fd8\u4f1a\u51fa\u73b0\u5728\u57fa\u51c6\u7ebf\u7684hoverinfo\u4e2d\uff08\u5373\u4f7f\u6307\u6807\u7684\u8ba1\u7b97\u4e0e\u57fa\u51c6\u7ebf\u65e0\u5173\uff09\uff0c\u53c2\u89c1\u4e0a\u56fe\u4e2d\u7684\u201c\u6307\u6807\u201d\u884c\u3002 3.3. \u4f7f\u7528\u6570\u636e\u9884\u53d6 \u00b6 Info since version 2.0.0-alpha76 \u5728\u56de\u6d4b\u4e2d\uff0c\u53ef\u4ee5\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u6570\u636e\u9884\u53d6\uff0c\u4ee5\u52a0\u5feb\u56de\u6d4b\u901f\u5ea6\u3002\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a \u5982\u679c\u7b56\u7565\u5728\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 \u5982\u679c\u5728\u56de\u6d4b\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u5077\u770b\u672a\u6765\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528peek\u65b9\u6cd5\u3002 3.4. \u5b8c\u6574SMA\u56de\u6d4b\u793a\u4f8b \u00b6 \u4ee5\u4e0b\u7b56\u7565\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5e76\u4e14\u9700\u8981\u4e8b\u5148\u5b89\u88c5omega\u670d\u52a1\u5668\u540c\u6b65\u6570\u636e\uff0c\u5e76\u6b63\u786e\u914d\u7f6eomicron\u3002 \u8be5\u793a\u4f8b\u5728\u300a\u5927\u5bcc\u7fc1\u91cf\u5316\u8bfe\u7a0b\u300b\u8bfe\u4ef6\u73af\u5883\u4e0b\u53ef\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () sec = \"600000.XSHG\" start = datetime . date ( 2022 , 1 , 4 ) end = datetime . date ( 2023 , 1 , 1 ) sma = SMAStrategy ( sec , url = cfg . backtest . url , is_backtest = True , start = start , end = end , frame_type = FrameType . DAY ) await sma . backtest ( portfolio = [ sec ], min_bars = 10 , stop_on_error = False ) await sma . plot_metrics ( sma . indicators ) 3.5. \u5b9e\u76d8 \u00b6 \u5728\u5b9e\u76d8\u73af\u5883\u4e0b\uff0c\u4f60\u8fd8\u9700\u8981\u5728\u5b50\u7c7b\u4e2d\u52a0\u5165\u5468\u671f\u6027\u4efb\u52a1(\u6bd4\u5982\u6bcf\u5206\u949f\u6267\u884c\u4e00\u6b21\uff09\uff0c\u5728\u8be5\u4efb\u52a1\u4e2d\u8c03\u7528 predict \u65b9\u6cd5\u6765\u5b8c\u6210\u4ea4\u6613\uff0c\u5982\u4ee5\u4e0b\u793a\u4f8b\u6240\u793a\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType from apscheduler.schedulers.asyncio import AsyncIOScheduler cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () async def daily_job (): sma = SMAStrategy ( sec , url = cfg . traderserver . url , is_backtest = False , frame_type = FrameType . DAY ) bars = await Stock . get_bars ( sma . _sec , 20 , FrameType . DAY ) await sma . predict ( barss = { sma . _sec : bars }) async def main (): scheduler = AsyncIOScheduler () scheduler . add_job ( daily_job , 'cron' , hour = 14 , minute = 55 ) scheduler . start () \u7b56\u7565\u4ee3\u7801\u65e0\u987b\u4fee\u6539\u3002 \u8be5\u7b56\u7565\u5c06\u81ea\u52a8\u5728\u6bcf\u5929\u768414\uff1a55\u8fd0\u884c\uff0c\u4ee5\u5224\u65ad\u662f\u5426\u8981\u8fdb\u884c\u8c03\u4ed3\u6362\u80a1\u3002\u60a8\u9700\u8981\u989d\u5916\u5224\u65ad\u5f53\u5929\u662f\u5426\u4e3a\u4ea4\u6613\u65e5\u3002 4. \u7ed8\u56fe \u00b6 omicron \u901a\u8fc7 Candlestick \u63d0\u4f9b\u4e86 k \u7ebf\u7ed8\u5236\u529f\u80fd\u3002\u9ed8\u8ba4\u5730\uff0c\u5b83\u5c06\u7ed8\u5236\u4e00\u5e45\u663e\u793a 120 \u4e2a bar\uff0c\u53ef\u62d6\u52a8\uff08\u4ee5\u52a0\u8f7d\u66f4\u591a bar)\uff0c\u5e76\u4e14\u53ef\u4ee5\u53e0\u52a0\u526f\u56fe\u3001\u4e3b\u56fe\u53e0\u52a0\u5404\u79cd\u6307\u6807\u7684 k \u7ebf\u56fe\uff1a \u4e0a\u56fe\u663e\u793a\u4e86\u81ea\u52a8\u68c0\u6d4b\u51fa\u6765\u7684\u5e73\u53f0\u3002\u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u8fdb\u884c\u9876\u5e95\u81ea\u52a8\u68c0\u6d4b\u548c\u6807\u6ce8\u3002 Note \u901a\u8fc7\u6307\u5b9a width \u53c2\u6570\uff0c\u53ef\u4ee5\u5f71\u54cd\u521d\u59cb\u52a0\u8f7d\u7684bar\u7684\u6570\u91cf\u3002 omicron \u901a\u8fc7 metris \u63d0\u4f9b\u56de\u6d4b\u62a5\u544a\u3002\u8be5\u62a5\u544a\u7c7b\u4f3c\u4e8e\uff1a \u5b83\u540c\u6837\u63d0\u4f9b\u53ef\u62d6\u52a8\u7684\u7ed8\u56fe\uff0c\u5e76\u4e14\u5728\u4e70\u5356\u70b9\u4e0a\u53ef\u4ee5\u901a\u8fc7\u9f20\u6807\u60ac\u505c\uff0c\u663e\u793a\u4e70\u5356\u70b9\u4fe1\u606f\u3002 omicron \u7684\u7ed8\u56fe\u529f\u80fd\u53ea\u80fd\u5728 notebook \u4e2d\u4f7f\u7528\u3002 5. \u8bc4\u4f30\u6307\u6807 \u00b6 omicron \u63d0\u4f9b\u4e86 mean_absolute_error \u51fd\u6570\u548c pct_error \u51fd\u6570\u3002\u5b83\u4eec\u5728 scipy \u6216\u8005\u5176\u5b83\u5e93\u4e2d\u4e5f\u80fd\u627e\u5230\uff0c\u4e3a\u4e86\u65b9\u4fbf\u4e0d\u719f\u6089\u8fd9\u4e9b\u7b2c\u4e09\u65b9\u5e93\u7684\u4f7f\u7528\u8005\uff0c\u6211\u4eec\u5185\u7f6e\u4e86\u8fd9\u4e2a\u5e38\u6307\u6807\u3002 \u5bf9\u4e00\u4e9b\u5e38\u89c1\u7684\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\uff0c\u6211\u4eec\u5f15\u7528\u4e86 empyrical \u4e2d\u7684\u76f8\u5173\u51fd\u6570\uff0c\u6bd4\u5982 alpha, beta, shapre_ratio\uff0c calmar_ratio \u7b49\u3002 6. TALIB \u5e93 \u00b6 \u60a8\u5e94\u8be5\u628a\u8fd9\u91cc\u63d0\u4f9b\u7684\u51fd\u6570\u5f53\u6210\u5b9e\u9a8c\u6027\u7684\u3002\u8fd9\u4e9b API \u4e5f\u53ef\u80fd\u5728\u67d0\u5929\u88ab\u5e9f\u5f03\u3001\u91cd\u547d\u540d\u3001\u4fee\u6539\uff0c\u6216\u8005\u8fd9\u4e9b API \u5e76\u6ca1\u6709\u591a\u5927\u4f5c\u7528\uff0c\u6216\u8005\u5b83\u4eec\u7684\u5b9e\u73b0\u5b58\u5728\u9519\u8bef\u3002 \u4f46\u662f\uff0c\u5982\u679c\u6211\u4eec\u5c06\u6765\u4f1a\u629b\u5f03\u8fd9\u4e9b API \u7684\u8bdd\uff0c\u6211\u4eec\u4e00\u5b9a\u4f1a\u901a\u8fc7 depracted \u65b9\u6cd5\u63d0\u524d\u8fdb\u884c\u8b66\u544a\u3002 7. \u6269\u5c55 \u00b6 Python\u5f53\u4e2d\u7684\u56db\u820d\u4e94\u5165\u7528\u4e8e\u8bc1\u5238\u6295\u8d44\uff0c\u4f1a\u5e26\u6765\u4e25\u91cd\u7684\u95ee\u9898\uff0c\u6bd4\u5982\uff0c\u50cf round(0.3/2) \uff0c\u6211\u4eec\u671f\u671b\u5f97\u5230 0.2 \uff0c\u4f46\u5b9e\u9645\u4e0a\u4f1a\u5f97\u5230 0.1 \u3002\u8fd9\u79cd\u8bef\u5dee\u4e00\u65e6\u53d1\u751f\u6210\u5728\u4e00\u4e9b\u4f4e\u4ef7\u80a1\u8eab\u4e0a\uff0c\u5c06\u4f1a\u5e26\u6765\u975e\u5e38\u5927\u7684\u4e0d\u786e\u5b9a\u6027\u3002\u6bd4\u5982\uff0c1.945\u4fdd\u7559\u4e24\u4f4d\u5c0f\u6570\uff0c\u672c\u6765\u5e94\u8be5\u662f1.95\uff0c\u5982\u679c\u88ab\u8bef\u820d\u5165\u4e3a1.94\uff0c\u5219\u8bef\u5dee\u63a5\u8fd10.5%\uff0c\u8fd9\u5bf9\u6295\u8d44\u6765\u8bf4\u662f\u96be\u4ee5\u63a5\u53d7\u7684\u3002 Info \u5982\u679c\u4e00\u5929\u53ea\u8fdb\u884c\u4e00\u6b21\u4ea4\u6613\uff0c\u4e00\u6b21\u4ea4\u6613\u8bef\u5dee\u4e3a0.5%\uff0c\u4e00\u5e74\u7d2f\u79ef\u4e0b\u6765\uff0c\u8bef\u5dee\u5c06\u8fbe\u52302.5\u500d\u3002 \u6211\u4eec\u5728 decimals \u4e2d\u63d0\u4f9b\u4e86\u9002\u7528\u4e8e\u8bc1\u5238\u4ea4\u6613\u9886\u57df\u7684\u7248\u672c\uff0c math_round \u548c\u4ef7\u683c\u6bd4\u8f83\u51fd\u6570 price_equal \u3002 \u6211\u4eec\u8fd8\u5728 np \u4e2d\uff0c\u5bf9numpy\u4e2d\u7f3a\u5931\u7684\u4e00\u4e9b\u529f\u80fd\u8fdb\u884c\u4e86\u8865\u5145\uff0c\u6bd4\u5982 numpy_append_fields , fill_nan \u7b49\u3002","title":"\u4f7f\u7528\u6559\u7a0b"},{"location":"usage/#1-\u914d\u7f6e\u521d\u59cb\u5316\u548c\u5173\u95ed-omicron","text":"Omicron \u4f9d\u8d56\u4e8e zillionare-omega \u670d\u52a1\u6765\u83b7\u53d6\u6570\u636e\u3002\u4f46\u5b83\u5e76\u4e0d\u76f4\u63a5\u4e0e Omega \u670d\u52a1\u901a\u8baf\uff0c\u76f8\u53cd\uff0c\u5b83\u76f4\u63a5\u8bfb\u53d6 Omega \u670d\u52a1\u5668\u4f1a\u5199\u5165\u6570\u636e\u7684 Influxdb \u548c redis \u6570\u636e\u5e93\u3002\u56e0\u6b64\uff0c\u5728\u4f7f\u7528 Omicron \u4e4b\u524d\uff0c\u6211\u4eec\u9700\u8981\u63d0\u4f9b\u8fd9\u4e24\u4e2a\u670d\u52a1\u5668\u7684\u8fde\u63a5\u5730\u5740\uff0c\u5e76\u8fdb\u884c\u521d\u59cb\u5316\u3002","title":"1. \u914d\u7f6e\u3001\u521d\u59cb\u5316\u548c\u5173\u95ed OMICRON"},{"location":"usage/#11-\u914d\u7f6e\u548c\u521d\u59cb\u5316","text":"Omicron \u4f7f\u7528 cfg4py \u6765\u7ba1\u7406\u914d\u7f6e\u3002 cfg4py \u4f7f\u7528 yaml \u6587\u4ef6\u6765\u4fdd\u5b58\u914d\u7f6e\u9879\u3002\u5728\u4f7f\u7528 cfg4py \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u5728\u67d0\u5904\u521d\u59cb\u5316 cfg4py\uff0c\u7136\u540e\u518d\u521d\u59cb\u5316 omicron: Tip \u4e3a\u4e86\u7b80\u6d01\u8d77\u89c1\uff0c\u6211\u4eec\u5728\u9876\u5c42\u4ee3\u7801\u4e2d\u76f4\u63a5\u4f7f\u7528\u4e86 async/await\u3002\u901a\u5e38\uff0c\u8fd9\u4e9b\u4ee3\u7801\u80fd\u591f\u76f4\u63a5\u5728 notebook \u4e2d\u8fd0\u884c\uff0c\u4f46\u5982\u679c\u9700\u8981\u5728\u666e\u901a\u7684 python \u811a\u672c\u4e2d\u8fd0\u884c\u8fd9\u4e9b\u4ee3\u7801\uff0c\u60a8\u901a\u5e38\u9700\u8981\u5c06\u5176\u5c01\u88c5\u5230\u4e00\u4e2a\u5f02\u6b65\u51fd\u6570\u4e2d\uff0c\u518d\u901a\u8fc7 asyncio.run \u6765\u8fd0\u884c\u5b83\u3002 1 2 3 4 5 6 7 8 9 import asyncio import cfg4py import omicron async def main (): cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () # DO YOUR GREAT JOB WITH OMICRON asyncio . run ( main ()) 1 2 3 4 5 import cfg4py import omicron cfg4py . init ( 'path/to/your/config/dir' ) await omicron . init () \u6ce8\u610f\u521d\u59cb\u5316 cfg4py \u65f6\uff0c\u9700\u8981\u63d0\u4f9b\u5305\u542b\u914d\u7f6e\u6587\u4ef6\u7684 \u6587\u4ef6\u5939 \u7684\u8def\u5f84\uff0c\u800c \u4e0d\u662f\u914d\u7f6e\u6587\u4ef6 \u7684\u8def\u5f84\u3002\u914d\u7f6e\u6587\u4ef6\u540d\u5fc5\u987b\u4e3a defaults.yml\u3002 \u60a8\u81f3\u5c11\u5e94\u8be5\u4e3a omicron \u914d\u7f6e Redis \u8fde\u63a5\u4e32\u548c influxdb \u8fde\u63a5\u4e32\u3002\u4e0b\u9762\u662f\u5e38\u7528\u914d\u7f6e\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 # DEFAULTS.YAML redis : dsn : redis://${REDIS_HOST}:${REDIS_PORT} influxdb : url : http://${INFLUXDB_HOST}:${INFLUXDB_PORT} token : ${INFLUXDB_TOKEN} org : ${INFLUXDB_ORG} bucket_name : ${INFLUXDB_BUCKET_NAME} enable_compress : true max_query_size : 150000 notify : mail_from : ${MAIL_FROM} mail_to : - ${MAIL_TO} mail_server : ${MAIL_SERVER} dingtalk_access_token : ${DINGTALK_ACCESS_TOKEN} dingtalk_secret : ${DINGTALK_SECRET} \u8bf7\u6839\u636e\u60a8\u5b9e\u9645\u73af\u5883\u914d\u7f6e\u6765\u66f4\u6539\u4e0a\u8ff0\u6587\u4ef6\u3002\u4e0a\u8ff0\u914d\u7f6e\u4e2d\uff0c${{REDIS_HOST}}\u610f\u5473\u7740\u73af\u5883\u53d8\u91cf\u3002\u5982\u679c\u662f windows\uff0c\u60a8\u9700\u8981\u5728\u7cfb\u7edf > \u73af\u5883\u53d8\u91cf\u4e2d\u8fdb\u884c\u8bbe\u7f6e\u3002\u5982\u679c\u662f Linux \u6216\u8005 Mac\uff0c\u60a8\u9700\u8981\u4fee\u6539.bashrc\uff0c\u4f8b\u5982\uff1a 1 export REDIS_HOST=localhost","title":"1.1. \u914d\u7f6e\u548c\u521d\u59cb\u5316"},{"location":"usage/#12-\u5173\u95ed-omicron","text":"\u5728\u60a8\u7684\u8fdb\u7a0b\u5373\u5c06\u9000\u51fa\u4e4b\u524d\uff0c\u8bf7\u8bb0\u5f97\u5173\u95ed omicron\u3002\u5982\u679c\u60a8\u662f\u5728 notebook \u4e2d\u4f7f\u7528 omicron, \u5219\u53ef\u4ee5\u5ffd\u7565\u6b64\u6b65\u805a\u3002 1 await omicron . close ()","title":"1.2. \u5173\u95ed omicron"},{"location":"usage/#2-\u6570\u636e\u8bfb\u53d6","text":"","title":"2. \u6570\u636e\u8bfb\u53d6"},{"location":"usage/#21-\u8bc1\u5238\u5217\u8868","text":"Security \u548c Query \u63d0\u4f9b\u4e86\u8bc1\u5238\u5217\u8868\u548c\u67e5\u8be2\u64cd\u4f5c\u3002\u67e5\u8be2\u88ab\u8bbe\u8ba1\u6210\u4e3a\u94fe\u5f0f API\u3002\u901a\u5e38\uff0c\u6211\u4eec\u901a\u8fc7\u8c03\u7528 Security.select() \u6765\u751f\u6210\u4e00\u4e2a Query \u5bf9\u8c61\uff0c\u7136\u540e\u53ef\u4ee5\u9488\u5bf9\u6b64\u5bf9\u8c61\uff0c\u8fdb\u884c\u5404\u79cd\u8fc7\u67e5\u8be2\u8fc7\u6ee4\uff0c\u6700\u540e\uff0c\u6211\u4eec\u8c03\u7528 query.eval() \u65b9\u6cd5\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\uff0c\u5e76\u8fd4\u56de\u7ed3\u679c\u3002","title":"2.1. \u8bc1\u5238\u5217\u8868"},{"location":"usage/#211-\u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801","text":"\u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\u6765\u83b7\u53d6\u67d0\u4e00\u5929\u7684\u8bc1\u5238\u5217\u8868\uff1a 1 2 3 4 5 6 7 # 4. ASSUME YOU HAVE OMICRON INIT dt = datetime . date ( 2022 , 5 , 20 ) query = Security . select ( dt ) codes = await query . eval () print ( codes ) # THE OUTPUTS IS LIKE [\"000001.XSHE\", \"000004.XSHE\", ...] \u8fd9\u91cc\u7684 dt \u5982\u679c\u6ca1\u6709\u63d0\u4f9b\u7684\u8bdd\uff0c\u5c06\u4f7f\u7528\u6700\u65b0\u7684\u8bc1\u5238\u5217\u8868\u3002\u4f46\u5728\u56de\u6d4b\u4e2d\uff0c\u60a8\u901a\u5e38\u4e0d\u540c\u65f6\u95f4\u7684\u8bc1\u5238\u5217\u8868\uff0c\u56e0\u6b64\uff0c dt \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\u662f\u5fc5\u987b\u7684\uff0c\u5426\u5219\uff0c\u60a8\u5c06\u5f15\u5165\u672a\u6765\u6570\u636e\u3002","title":"2.1.1. \u67e5\u8be2\u6240\u6709\u8bc1\u5238\u4ee3\u7801"},{"location":"usage/#212-\u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570","text":"1 2 3 query = Security . select ( dt ) codes = await query . types ([ \"stock\" ]) . eval () print ( codes )","title":"2.1.2. \u8fd4\u56de\u6240\u6709\u80a1\u7968\u6216\u8005\u6307\u6570"},{"location":"usage/#213-\u6392\u9664\u67d0\u79cd\u80a1\u7968\u8bc1\u5238","text":"1 2 3 query = Security . select ( dt ) codes = await query . exclude_st () . exclude_kcb () . exclude_cyb () . eval () print ( codes )","title":"2.1.3. \u6392\u9664\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09"},{"location":"usage/#214-\u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\u8bc1\u5238","text":"1 2 3 4 query = Security . select ( dt ) codes = await query . only_kcb () . only_st () . only_cyb () . eval () print ( codes ) #\u5f97\u5230\u7a7a\u5217\u8868","title":"2.1.4. \u5982\u679c\u53ea\u8981\u6c42\u67d0\u79cd\u80a1\u7968\uff08\u8bc1\u5238\uff09"},{"location":"usage/#215-\u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2","text":"A \u80a1\u7684\u8bc1\u5238\u5728\u6807\u8bc6\u4e0a\uff0c\u4e00\u822c\u6709\u4ee3\u7801\uff08code \u6216\u8005 symbol)\u3001\u62fc\u97f3\u7b80\u5199 (name) \u548c\u6c49\u5b57\u8868\u793a\u540d (display_name) \u4e09\u79cd\u6807\u8bc6\u3002\u6bd4\u5982\u4e2d\u56fd\u5e73\u5b89\uff0c\u5176\u4ee3\u7801\u4e3a 601318.XSHG; \u5176\u62fc\u97f3\u7b80\u5199\u4e3a ZGPA\uff1b\u800c\u4e2d\u56fd\u5e73\u5b89\u88ab\u79f0\u4e3a\u5b83\u7684\u522b\u540d ( alias )\u3002 \u5982\u679c\u8981\u67e5\u8be2\u6240\u6709\u4e2d\u5b57\u5934\u7684\u80a1\u7968\uff1a 1 2 3 query = Security . select ( dt ) codes = await query . alias_like ( \"\u4e2d\" ) . eval () print ( codes )","title":"2.1.5. \u6309\u522b\u540d\u8fdb\u884c\u6a21\u7cca\u67e5\u8be2"},{"location":"usage/#216-\u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f","text":"\u901a\u8fc7\u524d\u9762\u7684\u67e5\u8be2\u6211\u4eec\u53ef\u4ee5\u5f97\u5230\u4e00\u4e2a\u8bc1\u5238\u5217\u8868\uff0c\u5982\u679c\u8981\u5f97\u5230\u5177\u4f53\u7684\u4fe1\u606f\uff0c\u53ef\u4ee5\u901a\u8fc7 info \u63a5\u53e3\u6765\u67e5\u8be2\uff1a 1 2 3 dt = datetime . date ( 2022 , 5 , 20 ) info = await Security . info ( \"688001.XSHG\" , dt ) print ( info ) \u8f93\u51fa\u4e3a\uff1a 1 2 3 4 5 6 7 8 { ' t ype' : 's t ock' , 'display_ na me' : '\u534e\u5174\u6e90\u521b' , 'alias' : '\u534e\u5174\u6e90\u521b' , 'e n d' : da tet ime.da te ( 2200 , 1 , 1 ) , 's tart ' : da tet ime.da te ( 2019 , 7 , 22 ) , ' na me' : 'HXYC' }","title":"2.1.6. \u901a\u8fc7\u4ee3\u7801\u67e5\u8be2\u5176\u5b83\u4fe1\u606f"},{"location":"usage/#22-\u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97","text":"Omicron \u4e0d\u4ec5\u63d0\u4f9b\u4e86\u4ea4\u6613\u65e5\u5386\uff0c\u4e0e\u5176\u5b83\u91cf\u5316\u6846\u67b6\u76f8\u6bd4\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u4e0e\u65f6\u95f4\u76f8\u5173\u7684\u8fd0\u7b97\u64cd\u4f5c\u3002\u8fd9\u4e9b\u64cd\u4f5c\u90fd\u6709\u8be6\u7ec6\u7684\u6587\u6863\u548c\u793a\u4f8b\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 TimeFrame \u6765\u8fdb\u4e00\u6b65\u9605\u8bfb\u3002 omicron \u4e2d\uff0c\u5e38\u5e38\u4f1a\u9047\u5230\u65f6\u95f4\u5e27 (Time Frame) \u8fd9\u4e2a\u6982\u5ff5\u3002\u56e0\u4e3a\u884c\u60c5\u6570\u636e\u90fd\u662f\u6309\u4e00\u5b9a\u7684\u65f6\u95f4\u957f\u5ea6\u7ec4\u7ec7\u7684\uff0c\u6bd4\u5982 5 \u5206\u949f\uff0c1 \u5929\uff0c\u7b49\u7b49\u3002\u56e0\u6b64\uff0c\u5728 omicron \u4e2d\uff0c\u6211\u4eec\u7ecf\u5e38\u4f7f\u7528\u67d0\u4e2a\u65f6\u95f4\u7247\u7ed3\u675f\u7684\u65f6\u95f4\uff0c\u6765\u6807\u8bc6\u8fd9\u4e2a\u65f6\u95f4\u7247\uff0c\u5e76\u5c06\u5176\u79f0\u4e4b\u4e3a\u5e27 (Time Frame)\u3002 omicron \u4e2d\uff0c\u6211\u4eec\u652f\u6301\u7684\u65f6\u95f4\u5e27\u5305\u62ec\u65e5\u5185\u7684\u5206\u949f\u5e27 (FrameType.MIN1), 5 \u5206\u949f\u5e27 (FrameType.MIN5), 15 \u5206\u949f\u5e27\u300130 \u5206\u949f\u5e27\u548c 60 \u5206\u949f\u5e27\uff0c\u4ee5\u53ca\u65e5\u7ebf\u7ea7\u522b\u7684 FrameType.DAY, FrameType.WEEK \u7b49\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u7c7b\u578b\u8bf4\u660e\uff0c\u8bf7\u53c2\u89c1 coretypes omicron \u63d0\u4f9b\u7684\u4ea4\u6613\u65e5\u5386\u8d77\u59cb\u4e8e 2005 \u5e74 1 \u6708 4 \u65e5\u3002\u63d0\u4f9b\u7684\u884c\u60c5\u6570\u636e\uff0c\u6700\u65e9\u4ece\u8fd9\u4e00\u5929\u8d77\u3002 \u5927\u81f4\u4e0a\uff0comicron \u63d0\u4f9b\u4e86\u4ee5\u4e0b\u65f6\u95f4\u5e27\u64cd\u4f5c\uff1a","title":"2.2. \u4ea4\u6613\u65e5\u5386\u53ca\u65f6\u95f4\u5e27\u8ba1\u7b97"},{"location":"usage/#221-\u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb","text":"\u5982\u679c\u4eca\u5929\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u60a8\u60f3\u5f97\u5230 100 \u5929\u524d\u7684\u4ea4\u6613\u65e5\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 day_shift: 1 2 3 4 from omicron import tf dt = datetime . date ( 2022 , 5 , 20 ) tf . day_shift ( dt , - 100 ) \u8f93\u51fa\u662f datetime.date(2021, 12, 16)\u3002\u5728\u8fd9\u91cc\uff0cday_shift \u7684\u7b2c\u4e8c\u4e2a\u53c2\u6570 n \u662f\u504f\u79fb\u91cf\uff0c\u5f53\u5b83\u5c0f\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u524d n \u4e2a\u4ea4\u6613\u65e5\uff1b\u5f53\u5b83\u5927\u4e8e\u96f6\u65f6\uff0c\u662f\u627e dt \u4e4b\u540e\u7684 n \u4e2a\u4ea4\u6613\u65e5\u3002 \u6bd4\u5982\u6709\u610f\u601d\u7684\u662f n == 0 \u7684\u65f6\u5019\u3002\u5bf9\u4e0a\u8ff0 dt \uff0cday_shift(dt, 0) \u5f97\u5230\u7684\u4ecd\u7136\u662f\u540c\u4e00\u5929\uff0c\u4f46\u5982\u679c dt \u662f 2022 \u5e74 5 \u6708 21 \u65e5\u662f\u5468\u516d\uff0c\u5219 day_shift(datetime.date(2022, 5, 21)) \u5c06\u8fd4\u56de 2022 \u5e74 5 \u6708 20 \u65e5\u3002\u56e0\u4e3a 5 \u6708 21 \u65e5\u8fd9\u4e00\u5929\u662f\u5468\u516d\uff0c\u4e0d\u662f\u4ea4\u6613\u65e5\uff0cday_shift \u5c06\u8fd4\u56de\u5176\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff0c\u8fd9\u5728\u591a\u6570\u60c5\u51b5\u4e0b\u4f1a\u975e\u5e38\u65b9\u4fbf\u3002 \u9664\u4e86 day_shift \u5916\uff0ctimeframe \u8fd8\u63d0\u4f9b\u4e86\u7c7b\u4f3c\u51fd\u6570\u6bd4\u5982 week_shift \u7b49\u3002\u4e00\u822c\u5730\uff0c\u60a8\u53ef\u4ee5\u7528 shift(dt, n, frame_type) \u6765\u5bf9\u4efb\u610f\u652f\u6301\u7684\u65f6\u95f4\u8fdb\u884c\u504f\u79fb\u3002","title":"2.2.1. \u4ea4\u6613\u65f6\u95f4\u7684\u504f\u79fb"},{"location":"usage/#222-\u8fb9\u754c\u64cd\u4f5c-ceiling-\u548c-floor","text":"\u5f88\u591a\u65f6\u5019\u6211\u4eec\u9700\u8981\u77e5\u9053\u5177\u4f53\u7684\u67d0\u4e2a\u65f6\u95f4\u70b9 (moment) \u6240\u5c5e\u7684\u5e27\u3002\u5982\u679c\u8981\u53d6\u5176\u4e0a\u4e00\u5e27\uff0c\u5219\u53ef\u4ee5\u7528 floor \u64cd\u4f5c\uff0c\u53cd\u4e4b\uff0c\u4f7f\u7528 ceiling\u3002 1 2 tf . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) # OUTPUT IS DATETIME.DATE(2005, 1, 7)","title":"2.2.2. \u8fb9\u754c\u64cd\u4f5c ceiling \u548c floor"},{"location":"usage/#223-\u65f6\u95f4\u8f6c\u6362","text":"\u4e3a\u4e86\u52a0\u5feb\u901f\u5ea6\uff0c\u4ee5\u53ca\u65b9\u4fbf\u6301\u4e45\u5316\u5b58\u50a8\uff0c\u5728 timeframe \u5185\u90e8\uff0c\u6709\u65f6\u5019\u4f7f\u7528\u6574\u6570\u6765\u8868\u793a\u65f6\u95f4\u3002\u6bd4\u5982 20220502 \u8868\u793a\u7684\u662f 2022 \u5e74 5 \u6708 20 \u65e5\uff0c\u800c 202205220931 \u5219\u8868\u793a 2022 \u5e74 5 \u6708 20 \u65e5 9 \u65f6 31 \u5206\u949f\u3002 \u8fd9\u79cd\u8868\u793a\u6cd5\uff0c\u6709\u65f6\u5019\u8981\u6c42\u6211\u4eec\u8fdb\u884c\u4e00\u4e9b\u8f6c\u6362\uff1a 1 2 3 4 5 6 7 8 9 10 # \u5c06\u6574\u6570\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u4e3a\u65e5\u671f tf . int2date ( 20220522 ) # datetime.date(2022, 5, 22) # \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a\u65f6\u95f4 tf . int2time ( 202205220931 ) # datetime.datetime(2022, 5, 22, 9, 31) # \u5c06\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u6574\u6570 tf . date2int ( datetime . date ( 2022 , 5 , 22 )) # 20220520 # \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u4e3a\u65f6\u95f4 tf . date2time ( datetime . datetime ( 2022 , 5 , 22 , 9 , 21 )) # 202205220921","title":"2.2.3. \u65f6\u95f4\u8f6c\u6362"},{"location":"usage/#224-\u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27","text":"\u6709\u65f6\u5019\u6211\u4eec\u9700\u8981\u5f97\u5230 start \u548c end \u4e4b\u95f4\u67d0\u4e2a\u65f6\u95f4\u5e27\u7c7b\u578b\u7684\u6240\u6709\u65f6\u95f4\u5e27\uff1a 1 2 3 4 start = arrow . get ( '2020-1-13 10:00' ) . naive end = arrow . get ( '2020-1-13 13:30' ) . naive tf . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Important \u4e0a\u9762\u7684\u793a\u4f8b\u4e2d\uff0c\u51fa\u73b0\u4e86\u53ef\u80fd\u60a8\u4e0d\u592a\u719f\u6089\u7684 naive \u5c5e\u6027\u3002\u5b83\u6307\u7684\u662f\u53d6\u4e0d\u5e26\u65f6\u533a\u7684\u65f6\u95f4\u3002\u5728 python \u4e2d\uff0c\u65f6\u95f4\u53ef\u4ee5\u5e26\u65f6\u533a\uff08timezone-aware) \u548c\u4e0d\u5e26\u65f6\u533a (naive)\u3002 \u5982\u679c\u60a8\u4f7f\u7528 datetime.datetime(2022, 5, 20)\uff0c\u5b83\u5c31\u662f\u4e0d\u5e26\u65f6\u533a\u7684\uff0c\u9664\u975e\u60a8\u4e13\u95e8\u6307\u5b9a\u65f6\u533a\u3002 \u5728 omicron \u4e2d\uff0c\u6211\u4eec\u5728\u7edd\u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u4ec5\u4f7f\u7528 naive \u8868\u793a\u7684\u65f6\u95f4\uff0c\u5373\u4e0d\u5e26\u65f6\u533a\uff0c\u5e76\u4e14\u5047\u5b9a\u65f6\u533a\u4e3a\u4e1c\u516b\u533a\uff08\u5373\u5317\u4eac\u65f6\u95f4\uff09\u3002 \u5982\u679c\u60a8\u53ea\u77e5\u9053\u7ed3\u675f\u65f6\u95f4\uff0c\u9700\u8981\u5411\u524d\u53d6 n \u4e2a\u65f6\u95f4\u5e27\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528 get_frames_by_count \u3002 \u5982\u679c\u60a8\u53ea\u662f\u9700\u8981\u77e5\u9053\u5728 start \u548c end \u4e4b\u95f4\uff0c\u603b\u5171\u6709\u591a\u5c11\u4e2a\u5e27\uff0c\u8bf7\u4f7f\u7528 count_frames : 1 2 3 start = datetime . date ( 2019 , 12 , 21 ) end = datetime . date ( 2019 , 12 , 21 ) tf . count_frames ( start , end , FrameType . DAY ) \u8f93\u51fa\u5c06\u662f 1\u3002\u4e0a\u8ff0\u65b9\u6cd5\u8fd8\u6709\u4e00\u4e2a\u5feb\u6377\u65b9\u6cd5\uff0c\u5373 count_day_frames \uff0c\u5e76\u4e14\uff0c\u5bf9 week, month, quaters \u4e5f\u662f\u4e00\u6837\u3002","title":"2.2.4. \u5217\u51fa\u533a\u95f4\u5185\u7684\u6240\u6709\u65f6\u95f4\u5e27"},{"location":"usage/#23-\u8bfb\u53d6\u884c\u60c5\u6570\u636e","text":"\u73b0\u5728\uff0c\u8ba9\u6211\u4eec\u6765\u83b7\u53d6\u4e00\u6bb5\u884c\u60c5\u6570\u636e\uff1a 1 2 3 4 code = \"000001.XSHE\" end = datetime . date ( 2022 , 5 , 20 ) bars = await Stock . get_bars ( code , 10 , FrameType . DAY , end ) \u8fd4\u56de\u7684 bars \u5c06\u662f\u4e00\u4e2a numpy structured array, \u5176\u7c7b\u578b\u4e3a bars_dtype \u3002\u4e00\u822c\u5730\uff0c\u5b83\u5305\u62ec\u4e86\u4ee5\u4e0b\u5b57\u6bb5\uff1a 1 2 3 4 5 6 7 8 * frame\uff08\u5e27\uff09 * open\uff08\u5f00\u76d8\u4ef7\uff09 * high\uff08\u6700\u9ad8\u4ef7\uff09 * low\uff08\u6700\u4f4e\u4ef7\uff09 * close\uff08\u6536\u76d8\u4ef7\uff09 * volume\uff08\u6210\u4ea4\u91cf\uff0c\u80a1\u6570\uff09 * amount\uff08\u6210\u4ea4\u989d\uff09 * factor\uff08\u590d\u6743\u56e0\u5b50\uff09 \u7f3a\u7701\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u662f\u5230 end \u4e3a\u6b62\u7684\u524d\u590d\u6743\u6570\u636e\u3002\u4f60\u53ef\u4ee5\u901a\u53c2\u6570 fq = False \u5173\u95ed\u5b83\uff0c\u6765\u83b7\u5f97\u4e0d\u590d\u6743\u6570\u636e\uff0c\u5e76\u4ee5\u6b64\u81ea\u884c\u8ba1\u7b97\u540e\u590d\u6743\u6570\u636e\u3002 \u5982\u679c\u8981\u83b7\u53d6\u67d0\u4e2a\u65f6\u95f4\u6bb5\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528 get_bars_in_range \u3002 \u4e0a\u8ff0\u65b9\u6cd5\u603b\u662f\u5c3d\u6700\u5927\u53ef\u80fd\u8fd4\u56de\u5b9e\u65f6\u6570\u636e\uff0c\u5982\u679c end \u4e3a\u5f53\u524d\u65f6\u95f4\u7684\u8bdd\uff0c\u4f46\u7531\u4e8e omega \u540c\u6b65\u5ef6\u65f6\u662f\u4e00\u5206\u949f\uff0c\u6240\u4ee5\u884c\u60c5\u6570\u636e\u6700\u591a\u53ef\u80fd\u6162\u4e00\u5206\u949f\u3002\u5982\u679c\u8981\u83b7\u53d6\u66f4\u5b9e\u65f6\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u901a\u8fc7 get_latest_price \u65b9\u6cd5\u3002 \u8981\u83b7\u6da8\u8dcc\u505c\u4ef7\u683c\u548c\u6807\u5fd7\uff0c\u8bf7\u4f7f\u7528: get_trade_price_limits trade_price_limits_flags trade_price_limit_flags_ex","title":"2.3. \u8bfb\u53d6\u884c\u60c5\u6570\u636e"},{"location":"usage/#24-\u677f\u5757\u6570\u636e","text":"\u63d0\u4f9b\u540c\u82b1\u987a\u677f\u5757\u884c\u4e1a\u677f\u5757\u548c\u6982\u5ff5\u677f\u5757\u6570\u636e\u3002\u5728\u4f7f\u7528\u672c\u6a21\u5757\u4e4b\u524d\uff0c\u9700\u8981\u8fdb\u884c\u521d\u59cb\u5316\uff1a 1 2 3 # \u8bf7\u5148\u8fdb\u884comicron\u521d\u59cb\u5316\uff0c\u7565 from omicron.models.board import Board , BoardType Board . init ( '192.168.100.101' ) \u6b64\u5904\u7684IP\u4e3a\u5b89\u88c5omega\u670d\u52a1\u5668\u7684ip\u3002 \u901a\u8fc7 board_list \u6765\u67e5\u8be2\u6240\u6709\u7684\u677f\u5757\u3002 \u5176\u5b83\u65b9\u6cd5\u8bf7\u53c2\u770b API\u6587\u6863","title":"2.4. \u677f\u5757\u6570\u636e"},{"location":"usage/#3-\u7b56\u7565\u7f16\u5199","text":"omicron \u901a\u8fc7 strategy \u6765\u63d0\u4f9b\u7b56\u7565\u6846\u67b6\u3002\u901a\u8fc7\u8be5\u6846\u67b6\u7f16\u5199\u7684\u7b56\u7565\uff0c\u53ef\u4ee5\u5728\u5b9e\u76d8\u548c\u56de\u6d4b\u4e4b\u95f4\u65e0\u7f1d\u8f6c\u6362 -- \u6839\u636e\u521d\u59cb\u5316\u65f6\u4f20\u5165\u7684\u670d\u52a1\u5668\u4e0d\u540c\u800c\u81ea\u52a8\u5207\u6362\u3002 omicron \u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u7684 \u53cc\u5747\u7ebf\u7b56\u7565 \u4f5c\u4e3a\u7b56\u7565\u7f16\u5199\u7684\u793a\u8303\uff0c\u53ef\u7ed3\u5408\u5176\u6e90\u7801\uff0c\u4ee5\u53ca\u672c\u6587\u6863\u4e2d\u7684 \u5b8c\u6574\u7b56\u7565\u793a\u4f8b \u5728notebook\u4e2d\u8fd0\u884c\u67e5\u770b\u3002 \u7b56\u7565\u6846\u67b6\u63d0\u4f9b\u4e86\u56de\u6d4b\u9a71\u52a8\u903b\u8f91\u53ca\u4e00\u4e9b\u57fa\u672c\u51fd\u6570\u3002\u8981\u7f16\u5199\u81ea\u5df1\u7684\u7b56\u7565\uff0c\u60a8\u9700\u8981\u4ece\u57fa\u7c7b BaseStrategy \u6d3e\u751f\u51fa\u81ea\u5df1\u7684\u5b50\u7c7b\uff0c\u5e76\u6539\u5199\u5b83\u7684 predict \u65b9\u6cd5\u6765\u5b9e\u73b0\u8c03\u4ed3\u6362\u80a1\u3002 \u7b56\u7565\u6846\u67b6\u4f9d\u8d56\u4e8e zillionare-trader-client \uff0c\u5728\u56de\u6d4b\u65f6\uff0c\u9700\u8981\u6709 zillionare-backtesting \u63d0\u4f9b\u56de\u6d4b\u670d\u52a1\u3002\u5728\u5b9e\u76d8\u65f6\uff0c\u9700\u8981 zilllionare-gm-adaptor \u6216\u8005\u5176\u5b83\u5b9e\u76d8\u4ea4\u6613\u7f51\u5173\u63d0\u4f9b\u670d\u52a1\u3002 \u7b56\u7565\u4ee3\u7801\u53ef\u4ee5\u4e0d\u52a0\u4fee\u6539\uff0c\u5373\u53ef\u4f7f\u7528\u4e8e\u56de\u6d4b\u548c\u5b9e\u76d8\u4e24\u79cd\u573a\u666f\u3002","title":"3. \u7b56\u7565\u7f16\u5199"},{"location":"usage/#31-\u56de\u6d4b\u573a\u666f","text":"\u5b9e\u73b0\u7b56\u7565\u56de\u6d4b\uff0c\u4e00\u822c\u9700\u8981\u8fdb\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a 1. \u4ece\u6b64\u57fa\u7c7b\u6d3e\u751f\u51fa\u4e00\u4e2a\u7b56\u7565\u5b50\u7c7b\uff0c\u6bd4\u5982sma.py 2. \u5b50\u7c7b\u9700\u8981\u91cd\u8f7d predict \u65b9\u6cd5\uff0c\u6839\u636e\u5f53\u524d\u4f20\u5165\u7684\u65f6\u95f4\u5e27\u548c\u5e27\u7c7b\u578b\u53c2\u6570\uff0c\u83b7\u53d6\u6570\u636e\u5e76\u8fdb\u884c\u5904\u7406\uff0c\u8bc4\u4f30\u51fa\u4ea4\u6613\u4fe1\u53f7\u3002 3. \u5b50\u7c7b\u6839\u636e\u4ea4\u6613\u4fe1\u53f7\uff0c\u5728 predict \u65b9\u6cd5\u91cc\uff0c\u8c03\u7528\u57fa\u7c7b\u7684 buy \u548c sell \u65b9\u6cd5\u6765\u8fdb\u884c\u4ea4\u6613 4. \u751f\u6210\u7b56\u7565\u5b9e\u4f8b\uff0c\u901a\u8fc7\u5b9e\u4f8b\u8c03\u7528 backtest \u65b9\u6cd5\u6765\u8fdb\u884c\u56de\u6d4b\uff0c\u8be5\u65b9\u6cd5\u5c06\u6839\u636e\u7b56\u7565\u6784\u5efa\u65f6\u6307\u5b9a\u7684\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4\u3001\u7ec8\u6b62\u65f6\u95f4\u3001\u5e27\u7c7b\u578b\uff0c\u9010\u5e27\u751f\u6210\u5404\u4e2a\u65f6\u95f4\u5e27\uff0c\u5e76\u8c03\u7528\u5b50\u7c7b\u7684 predict \u65b9\u6cd5\u3002\u5982\u679c\u8c03\u7528\u65f6\u6307\u5b9a\u4e86 portfolio \u548c min_bars \u53c2\u6570\uff0c backtest \u8fd8\u5c06\u8fdb\u884c\u6570\u636e\u9884\u53d6\uff0c\u5e76\u5c06\u622a\u6b62\u5230\u5f53\u524d\u56de\u6d4b\u5e27\u65f6\u7684\u6570\u636e\u4f20\u5165\u3002 4. \u5728\u4ea4\u6613\u7ed3\u675f\u65f6\uff0c\u8c03\u7528 plot_metrics \u65b9\u6cd5\u6765\u83b7\u53d6\u5982\u4e0b\u6240\u793a\u7684\u56de\u6d4b\u6307\u6807\u56fe \u5982\u4f55\u6d3e\u751f\u5b50\u7c7b\uff0c\u53ef\u4ee5\u53c2\u8003 sma \u6e90\u4ee3\u7801\u3002 1 2 3 4 5 6 7 8 9 10 from omicron.strategy.sma import SMAStrategy sma = SMAStrategy ( url = \"\" , # the url of either backtest server, or trade server is_backtest = True , start = datetime . date ( 2023 , 2 , 3 ), end = datetime . date ( 2023 , 4 , 28 ), frame_type = FrameType . DAY , ) await sma . backtest ( portfolio = [ \"600000.XSHG\" , min_bars = 20 ]) \u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u8981\u6307\u5b9a is_backtest=True \u548c start , end \u53c2\u6570\u3002","title":"3.1. \u56de\u6d4b\u573a\u666f"},{"location":"usage/#32-\u56de\u6d4b\u62a5\u544a","text":"\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u6cd5\uff0c\u5728notebook\u4e2d\u7ed8\u5236\u56de\u6d4b\u62a5\u544a\uff1a 1 await sma . plot_metrics () \u8fd9\u5c06\u7ed8\u5236\u51fa\u7c7b\u4f3c\u4ee5\u4e0b\u56fe\uff1a","title":"3.2. \u56de\u6d4b\u62a5\u544a"},{"location":"usage/#321-\u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807","text":"Info Since 2.0.0.a76 \u9996\u5148\uff0c\u6211\u4eec\u53ef\u4ee5\u5728\u7b56\u7565\u7c7b\u7684predict\u65b9\u6cd5\u4e2d\u8ba1\u7b97\u51fa\u6280\u672f\u6307\u6807\uff0c\u5e76\u4fdd\u5b58\u5230\u6210\u5458\u53d8\u91cf\u4e2d\u3002\u5728\u4e0b\u9762\u7684\u793a\u4f8b\u4ee3\u7801\u4e2d\uff0c\u6211\u4eec\u5c06\u6280\u672f\u6307\u6807\u53ca\u5f53\u65f6\u7684\u65f6\u95f4\u4fdd\u5b58\u5230\u4e86\u4e00\u4e2aindicators\u6570\u7ec4\u4e2d\uff08\u6ce8\u610f\u987a\u5e8f\uff01\uff09\uff0c\u7136\u540e\u5728\u56de\u6d4b\u7ed3\u675f\u540e\uff0c\u5728\u8c03\u7528 plot_metrics\u65f6\uff0c\u5c06\u5176\u4f20\u5165\u5373\u53ef\u3002 1 2 3 4 5 6 7 indicators = [ (datetime.date(2021, 2, 3), 20.1), (datetime.date(2021, 2, 4), 20.2), ..., (datetime.date(2021, 4, 1), 20.3) ] await sma.plot_metrics(indicator) \u65f6\u95f4\u53ea\u80fd\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u65f6\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u4ea7\u751f\u65e0\u6cd5\u4e0e\u5750\u6807\u8f74\u5bf9\u9f50\u7684\u60c5\u51b5\u3002 \u52a0\u5165\u7684\u6307\u6807\u9ed8\u8ba4\u53ea\u663e\u793a\u5728legend\u4e2d\uff0c\u5982\u679c\u8981\u663e\u793a\u5728\u4e3b\u56fe\u4e0a\uff0c\u9700\u8981\u70b9\u51fblegend\u8fdb\u884c\u663e\u793a\u3002 \u6307\u6807\u9664\u53ef\u4ee5\u53e0\u52a0\u5728\u4e3b\u56fe\u4e0a\u4e4b\u5916\uff0c\u8fd8\u4f1a\u51fa\u73b0\u5728\u57fa\u51c6\u7ebf\u7684hoverinfo\u4e2d\uff08\u5373\u4f7f\u6307\u6807\u7684\u8ba1\u7b97\u4e0e\u57fa\u51c6\u7ebf\u65e0\u5173\uff09\uff0c\u53c2\u89c1\u4e0a\u56fe\u4e2d\u7684\u201c\u6307\u6807\u201d\u884c\u3002","title":"3.2.1. \u5728\u56de\u6d4b\u62a5\u544a\u4e2d\u6dfb\u52a0\u6280\u672f\u6307\u6807"},{"location":"usage/#33-\u4f7f\u7528\u6570\u636e\u9884\u53d6","text":"Info since version 2.0.0-alpha76 \u5728\u56de\u6d4b\u4e2d\uff0c\u53ef\u4ee5\u4f7f\u7528\u4e3b\u5468\u671f\u7684\u6570\u636e\u9884\u53d6\uff0c\u4ee5\u52a0\u5feb\u56de\u6d4b\u901f\u5ea6\u3002\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a \u5982\u679c\u7b56\u7565\u5728\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 \u5982\u679c\u5728\u56de\u6d4b\u8fc7\u7a0b\u4e2d\uff0c\u9700\u8981\u5077\u770b\u672a\u6765\u6570\u636e\uff0c\u53ef\u4ee5\u4f7f\u7528peek\u65b9\u6cd5\u3002","title":"3.3. \u4f7f\u7528\u6570\u636e\u9884\u53d6"},{"location":"usage/#34-\u5b8c\u6574sma\u56de\u6d4b\u793a\u4f8b","text":"\u4ee5\u4e0b\u7b56\u7565\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5e76\u4e14\u9700\u8981\u4e8b\u5148\u5b89\u88c5omega\u670d\u52a1\u5668\u540c\u6b65\u6570\u636e\uff0c\u5e76\u6b63\u786e\u914d\u7f6eomicron\u3002 \u8be5\u793a\u4f8b\u5728\u300a\u5927\u5bcc\u7fc1\u91cf\u5316\u8bfe\u7a0b\u300b\u8bfe\u4ef6\u73af\u5883\u4e0b\u53ef\u8fd0\u884c\u3002 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () sec = \"600000.XSHG\" start = datetime . date ( 2022 , 1 , 4 ) end = datetime . date ( 2023 , 1 , 1 ) sma = SMAStrategy ( sec , url = cfg . backtest . url , is_backtest = True , start = start , end = end , frame_type = FrameType . DAY ) await sma . backtest ( portfolio = [ sec ], min_bars = 10 , stop_on_error = False ) await sma . plot_metrics ( sma . indicators )","title":"3.4. \u5b8c\u6574SMA\u56de\u6d4b\u793a\u4f8b"},{"location":"usage/#35-\u5b9e\u76d8","text":"\u5728\u5b9e\u76d8\u73af\u5883\u4e0b\uff0c\u4f60\u8fd8\u9700\u8981\u5728\u5b50\u7c7b\u4e2d\u52a0\u5165\u5468\u671f\u6027\u4efb\u52a1(\u6bd4\u5982\u6bcf\u5206\u949f\u6267\u884c\u4e00\u6b21\uff09\uff0c\u5728\u8be5\u4efb\u52a1\u4e2d\u8c03\u7528 predict \u65b9\u6cd5\u6765\u5b8c\u6210\u4ea4\u6613\uff0c\u5982\u4ee5\u4e0b\u793a\u4f8b\u6240\u793a\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 import cfg4py import omicron import datetime from omicron.strategy.sma import SMAStrategy from coretypes import FrameType from apscheduler.schedulers.asyncio import AsyncIOScheduler cfg = cfg4py . init ( \"/etc/zillionare\" ) await omicron . init () async def daily_job (): sma = SMAStrategy ( sec , url = cfg . traderserver . url , is_backtest = False , frame_type = FrameType . DAY ) bars = await Stock . get_bars ( sma . _sec , 20 , FrameType . DAY ) await sma . predict ( barss = { sma . _sec : bars }) async def main (): scheduler = AsyncIOScheduler () scheduler . add_job ( daily_job , 'cron' , hour = 14 , minute = 55 ) scheduler . start () \u7b56\u7565\u4ee3\u7801\u65e0\u987b\u4fee\u6539\u3002 \u8be5\u7b56\u7565\u5c06\u81ea\u52a8\u5728\u6bcf\u5929\u768414\uff1a55\u8fd0\u884c\uff0c\u4ee5\u5224\u65ad\u662f\u5426\u8981\u8fdb\u884c\u8c03\u4ed3\u6362\u80a1\u3002\u60a8\u9700\u8981\u989d\u5916\u5224\u65ad\u5f53\u5929\u662f\u5426\u4e3a\u4ea4\u6613\u65e5\u3002","title":"3.5. \u5b9e\u76d8"},{"location":"usage/#4-\u7ed8\u56fe","text":"omicron \u901a\u8fc7 Candlestick \u63d0\u4f9b\u4e86 k \u7ebf\u7ed8\u5236\u529f\u80fd\u3002\u9ed8\u8ba4\u5730\uff0c\u5b83\u5c06\u7ed8\u5236\u4e00\u5e45\u663e\u793a 120 \u4e2a bar\uff0c\u53ef\u62d6\u52a8\uff08\u4ee5\u52a0\u8f7d\u66f4\u591a bar)\uff0c\u5e76\u4e14\u53ef\u4ee5\u53e0\u52a0\u526f\u56fe\u3001\u4e3b\u56fe\u53e0\u52a0\u5404\u79cd\u6307\u6807\u7684 k \u7ebf\u56fe\uff1a \u4e0a\u56fe\u663e\u793a\u4e86\u81ea\u52a8\u68c0\u6d4b\u51fa\u6765\u7684\u5e73\u53f0\u3002\u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u8fdb\u884c\u9876\u5e95\u81ea\u52a8\u68c0\u6d4b\u548c\u6807\u6ce8\u3002 Note \u901a\u8fc7\u6307\u5b9a width \u53c2\u6570\uff0c\u53ef\u4ee5\u5f71\u54cd\u521d\u59cb\u52a0\u8f7d\u7684bar\u7684\u6570\u91cf\u3002 omicron \u901a\u8fc7 metris \u63d0\u4f9b\u56de\u6d4b\u62a5\u544a\u3002\u8be5\u62a5\u544a\u7c7b\u4f3c\u4e8e\uff1a \u5b83\u540c\u6837\u63d0\u4f9b\u53ef\u62d6\u52a8\u7684\u7ed8\u56fe\uff0c\u5e76\u4e14\u5728\u4e70\u5356\u70b9\u4e0a\u53ef\u4ee5\u901a\u8fc7\u9f20\u6807\u60ac\u505c\uff0c\u663e\u793a\u4e70\u5356\u70b9\u4fe1\u606f\u3002 omicron \u7684\u7ed8\u56fe\u529f\u80fd\u53ea\u80fd\u5728 notebook \u4e2d\u4f7f\u7528\u3002","title":"4. \u7ed8\u56fe"},{"location":"usage/#5-\u8bc4\u4f30\u6307\u6807","text":"omicron \u63d0\u4f9b\u4e86 mean_absolute_error \u51fd\u6570\u548c pct_error \u51fd\u6570\u3002\u5b83\u4eec\u5728 scipy \u6216\u8005\u5176\u5b83\u5e93\u4e2d\u4e5f\u80fd\u627e\u5230\uff0c\u4e3a\u4e86\u65b9\u4fbf\u4e0d\u719f\u6089\u8fd9\u4e9b\u7b2c\u4e09\u65b9\u5e93\u7684\u4f7f\u7528\u8005\uff0c\u6211\u4eec\u5185\u7f6e\u4e86\u8fd9\u4e2a\u5e38\u6307\u6807\u3002 \u5bf9\u4e00\u4e9b\u5e38\u89c1\u7684\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\uff0c\u6211\u4eec\u5f15\u7528\u4e86 empyrical \u4e2d\u7684\u76f8\u5173\u51fd\u6570\uff0c\u6bd4\u5982 alpha, beta, shapre_ratio\uff0c calmar_ratio \u7b49\u3002","title":"5. \u8bc4\u4f30\u6307\u6807"},{"location":"usage/#6-talib-\u5e93","text":"\u60a8\u5e94\u8be5\u628a\u8fd9\u91cc\u63d0\u4f9b\u7684\u51fd\u6570\u5f53\u6210\u5b9e\u9a8c\u6027\u7684\u3002\u8fd9\u4e9b API \u4e5f\u53ef\u80fd\u5728\u67d0\u5929\u88ab\u5e9f\u5f03\u3001\u91cd\u547d\u540d\u3001\u4fee\u6539\uff0c\u6216\u8005\u8fd9\u4e9b API \u5e76\u6ca1\u6709\u591a\u5927\u4f5c\u7528\uff0c\u6216\u8005\u5b83\u4eec\u7684\u5b9e\u73b0\u5b58\u5728\u9519\u8bef\u3002 \u4f46\u662f\uff0c\u5982\u679c\u6211\u4eec\u5c06\u6765\u4f1a\u629b\u5f03\u8fd9\u4e9b API \u7684\u8bdd\uff0c\u6211\u4eec\u4e00\u5b9a\u4f1a\u901a\u8fc7 depracted \u65b9\u6cd5\u63d0\u524d\u8fdb\u884c\u8b66\u544a\u3002","title":"6. TALIB \u5e93"},{"location":"usage/#7-\u6269\u5c55","text":"Python\u5f53\u4e2d\u7684\u56db\u820d\u4e94\u5165\u7528\u4e8e\u8bc1\u5238\u6295\u8d44\uff0c\u4f1a\u5e26\u6765\u4e25\u91cd\u7684\u95ee\u9898\uff0c\u6bd4\u5982\uff0c\u50cf round(0.3/2) \uff0c\u6211\u4eec\u671f\u671b\u5f97\u5230 0.2 \uff0c\u4f46\u5b9e\u9645\u4e0a\u4f1a\u5f97\u5230 0.1 \u3002\u8fd9\u79cd\u8bef\u5dee\u4e00\u65e6\u53d1\u751f\u6210\u5728\u4e00\u4e9b\u4f4e\u4ef7\u80a1\u8eab\u4e0a\uff0c\u5c06\u4f1a\u5e26\u6765\u975e\u5e38\u5927\u7684\u4e0d\u786e\u5b9a\u6027\u3002\u6bd4\u5982\uff0c1.945\u4fdd\u7559\u4e24\u4f4d\u5c0f\u6570\uff0c\u672c\u6765\u5e94\u8be5\u662f1.95\uff0c\u5982\u679c\u88ab\u8bef\u820d\u5165\u4e3a1.94\uff0c\u5219\u8bef\u5dee\u63a5\u8fd10.5%\uff0c\u8fd9\u5bf9\u6295\u8d44\u6765\u8bf4\u662f\u96be\u4ee5\u63a5\u53d7\u7684\u3002 Info \u5982\u679c\u4e00\u5929\u53ea\u8fdb\u884c\u4e00\u6b21\u4ea4\u6613\uff0c\u4e00\u6b21\u4ea4\u6613\u8bef\u5dee\u4e3a0.5%\uff0c\u4e00\u5e74\u7d2f\u79ef\u4e0b\u6765\uff0c\u8bef\u5dee\u5c06\u8fbe\u52302.5\u500d\u3002 \u6211\u4eec\u5728 decimals \u4e2d\u63d0\u4f9b\u4e86\u9002\u7528\u4e8e\u8bc1\u5238\u4ea4\u6613\u9886\u57df\u7684\u7248\u672c\uff0c math_round \u548c\u4ef7\u683c\u6bd4\u8f83\u51fd\u6570 price_equal \u3002 \u6211\u4eec\u8fd8\u5728 np \u4e2d\uff0c\u5bf9numpy\u4e2d\u7f3a\u5931\u7684\u4e00\u4e9b\u529f\u80fd\u8fdb\u884c\u4e86\u8865\u5145\uff0c\u6bd4\u5982 numpy_append_fields , fill_nan \u7b49\u3002","title":"7. \u6269\u5c55"},{"location":"api/board/","text":"Board \u00b6 Source code in omicron/models/board.py class Board : server_ip : str server_port : int measurement = \"board_bars_1d\" @classmethod def init ( cls , ip : str , port : int = 3180 ): cls . server_ip = ip cls . server_port = port @classmethod async def _rpc_call ( cls , url : str , param : str ): _url = f \"http:// { cls . server_ip } : { cls . server_port } /api/board/ { url } \" async with httpx . AsyncClient () as client : r = await client . post ( _url , json = param , timeout = 10 ) if r . status_code != 200 : logger . error ( f \"failed to post RPC call, { _url } : { param } , response: { r . content . decode () } \" ) return { \"rc\" : r . status_code } rsp = json . loads ( r . content ) return { \"rc\" : 200 , \"data\" : rsp } @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def new_concept_boards ( cls , days : int = 10 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def latest_concept_boards ( n : int = 3 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def new_concept_members ( days : int = 10 , prot : int = None ): raise NotImplementedError ( \"not ready\" ) @classmethod async def board_filter ( cls , industry = None , with_concepts : Optional [ List [ str ]] = None , without = [] ): raise NotImplementedError ( \"not ready\" ) @classmethod async def save_bars ( cls , bars ): client = get_influx_client () logger . info ( \"persisting bars to influxdb: %s , %d secs\" , cls . measurement , len ( bars ) ) await client . save ( bars , cls . measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) return True @classmethod async def get_last_date_of_bars ( cls , code : str ): # \u884c\u4e1a\u677f\u5757\u56de\u6eaf1\u5e74\u7684\u6570\u636e\uff0c\u6982\u5ff5\u677f\u5757\u53ea\u53d6\u5f53\u5e74\u7684\u6570\u636e code = f \" { code } .THS\" client = get_influx_client () now = datetime . datetime . now () dt_end = tf . day_shift ( now , 0 ) # 250 + 60: \u53ef\u4ee5\u5f97\u523060\u4e2aMA250\u7684\u70b9, \u9ed8\u8ba4K\u7ebf\u56fe120\u4e2a\u8282\u70b9 dt_start = tf . day_shift ( now , - 310 ) flux = ( Flux () . measurement ( cls . measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . tags ({ \"code\" : code }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return dt_start ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) bars = ds ( data ) secs = bars . to_records ( index = False ) . astype ( \"datetime64[s]\" ) _dt = secs [ - 1 ] . item () return _dt . date () @classmethod async def get_bars_in_range ( cls , code : str , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' ) async classmethod \u00b6 \u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme this function doesn't work Raise status 500 Returns: Type Description List [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] Source code in omicron/models/board.py @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_info_by_id ( board_id , full_mode = False ) async classmethod \u00b6 \u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: 1 2 3 4 5 6 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board . board_info_by_id ( board_code ) print ( board_info ) # \u5b57\u5178\u5f62\u5f0f # returns { 'code' : '881128' , 'name' : '\u6c7d\u8f66\u670d\u52a1' , 'stocks' : 14 } Returns: Type Description {'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or Source code in omicron/models/board.py @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_info_by_security ( security , _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board . board_info_by_security ( stock_code , _btype = BoardType . CONCEPT ) print ( stock_in_board ) # returns: [ { 'code' : '301715' , 'name' : '\u8bc1\u91d1\u6301\u80a1' , 'stocks' : 208 }, { 'code' : '308870' , 'name' : '\u6570\u5b57\u7ecf\u6d4e' , 'stocks' : 195 }, { 'code' : '308642' , 'name' : '\u6570\u636e\u4e2d\u5fc3' , 'stocks' : 188 }, ... , { 'code' : '300008' , 'name' : '\u65b0\u80fd\u6e90\u6c7d\u8f66' , 'stocks' : 603 } ] Returns: Type Description [{'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] Source code in omicron/models/board.py @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] board_list ( _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u83b7\u53d6\u677f\u5757\u5217\u8868 Parameters: Name Type Description Default _btype BoardType \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c BoardType.CONCEPT \u548c BoardType.INDUSTRY . Returns: Type Description List[List] \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a 1 2 3 4 5 6 [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] Source code in omicron/models/board.py @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] fuzzy_match_board_name ( pattern , _btype =< BoardType . CONCEPT : 'concept' > ) async classmethod \u00b6 \u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: 1 2 3 4 5 6 7 8 9 10 11 await Board . fuzzy_match_board_name ( \"\u6c7d\u8f66\" , BoardType . INDUSTRY ) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66' , '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6' , '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0' , '881128 \u6c7d\u8f66\u670d\u52a1' , '884107 \u6c7d\u8f66\u670d\u52a1\u2162' , '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] Parameters: Name Type Description Default pattern str \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 required _btype BoardType \u67e5\u8be2\u7c7b\u578b Returns: Type Description \u5305\u542b\u4ee5\u4e0bkey\u7684dict code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) Source code in omicron/models/board.py @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] get_bars_in_range ( code , start , end = None ) async classmethod \u00b6 \u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[ start , end ]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 start = datetime . date ( 2022 , 9 , 1 ) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime . date ( 2023 , 3 , 1 ) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board . get_bars_in_range ( board_code , start , end ) bars [ - 3 :] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec . array ([ ( '2023-02-27T00:00:00' , 1117.748 , 1124.364 , 1108.741 , 1109.525 , 1.77208600e+08 , 1.13933095e+09 , 1. ), ( '2023-02-28T00:00:00' , 1112.246 , 1119.568 , 1109.827 , 1113.43 , 1.32828124e+08 , 6.65160380e+08 , 1. ), ( '2023-03-01T00:00:00' , 1122.233 , 1123.493 , 1116.62 , 1123.274 , 7.21718910e+07 , 3.71172850e+08 , 1. ) ], dtype = [( 'frame' , ' BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def board_filter_members ( cls , included : List [ str ], excluded : List [ str ] = [], _btype : BoardType = BoardType . CONCEPT , ) -> List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ] @classmethod async def new_concept_boards ( cls , days : int = 10 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def latest_concept_boards ( n : int = 3 ): raise NotImplementedError ( \"not ready\" ) @classmethod async def new_concept_members ( days : int = 10 , prot : int = None ): raise NotImplementedError ( \"not ready\" ) @classmethod async def board_filter ( cls , industry = None , with_concepts : Optional [ List [ str ]] = None , without = [] ): raise NotImplementedError ( \"not ready\" ) @classmethod async def save_bars ( cls , bars ): client = get_influx_client () logger . info ( \"persisting bars to influxdb: %s , %d secs\" , cls . measurement , len ( bars ) ) await client . save ( bars , cls . measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) return True @classmethod async def get_last_date_of_bars ( cls , code : str ): # \u884c\u4e1a\u677f\u5757\u56de\u6eaf1\u5e74\u7684\u6570\u636e\uff0c\u6982\u5ff5\u677f\u5757\u53ea\u53d6\u5f53\u5e74\u7684\u6570\u636e code = f \" { code } .THS\" client = get_influx_client () now = datetime . datetime . now () dt_end = tf . day_shift ( now , 0 ) # 250 + 60: \u53ef\u4ee5\u5f97\u523060\u4e2aMA250\u7684\u70b9, \u9ed8\u8ba4K\u7ebf\u56fe120\u4e2a\u8282\u70b9 dt_start = tf . day_shift ( now , - 310 ) flux = ( Flux () . measurement ( cls . measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . tags ({ \"code\" : code }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return dt_start ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) bars = ds ( data ) secs = bars . to_records ( index = False ) . astype ( \"datetime64[s]\" ) _dt = secs [ - 1 ] . item () return _dt . date () @classmethod async def get_bars_in_range ( cls , code : str , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' List : \"\"\"\u6839\u636e\u677f\u5757\u540d\u7b5b\u9009\u80a1\u7968\uff0c\u53c2\u6570\u4e3ainclude, exclude Fixme: this function doesn't work Raise status 500 Returns: [['300181', '\u4f50\u529b\u836f\u4e1a'], ['600056', '\u4e2d\u56fd\u533b\u836f']] \"\"\" if not included : return [] if excluded is None : excluded = [] rsp = await cls . _rpc_call ( \"board_filter_members\" , { \"board_type\" : _btype . value , \"include_boards\" : included , \"exclude_boards\" : excluded , }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_filter_members()"},{"location":"api/board/#omicron.models.board.Board.board_info_by_id","text":"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: 1 2 3 4 5 6 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board . board_info_by_id ( board_code ) print ( board_info ) # \u5b57\u5178\u5f62\u5f0f # returns { 'code' : '881128' , 'name' : '\u6c7d\u8f66\u670d\u52a1' , 'stocks' : 14 } Returns: Type Description {'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or Source code in omicron/models/board.py @classmethod async def board_info_by_id ( cls , board_id : str , full_mode : bool = False ) -> dict : \"\"\"\u901a\u8fc7\u677f\u5757\u4ee3\u7801\u67e5\u8be2\u677f\u5757\u4fe1\u606f\uff08\u540d\u5b57\uff0c\u6210\u5458\u6570\u76ee\u6216\u6e05\u5355\uff09 Examples: ```python board_code = '881128' # \u6c7d\u8f66\u670d\u52a1 \u53ef\u81ea\u884c\u4fee\u6539 board_info = await Board.board_info_by_id(board_code) print(board_info) # \u5b57\u5178\u5f62\u5f0f # returns {'code': '881128', 'name': '\u6c7d\u8f66\u670d\u52a1', 'stocks': 14} ``` Returns: {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': 242} or {'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5', 'stocks': [['300916', '\u6717\u7279\u667a\u80fd'], ['300760', '\u8fc8\u745e\u533b\u7597']]} \"\"\" if not board_id : return {} if board_id [ 0 ] == \"3\" : _btype = BoardType . CONCEPT else : _btype = BoardType . INDUSTRY _mode = 0 if full_mode : # \u8f6c\u6362bool\u7c7b\u578b _mode = 1 rsp = await cls . _rpc_call ( \"info\" , { \"board_type\" : _btype . value , \"board_id\" : board_id , \"fullmode\" : _mode }, ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_info_by_id()"},{"location":"api/board/#omicron.models.board.Board.board_info_by_security","text":"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board . board_info_by_security ( stock_code , _btype = BoardType . CONCEPT ) print ( stock_in_board ) # returns: [ { 'code' : '301715' , 'name' : '\u8bc1\u91d1\u6301\u80a1' , 'stocks' : 208 }, { 'code' : '308870' , 'name' : '\u6570\u5b57\u7ecf\u6d4e' , 'stocks' : 195 }, { 'code' : '308642' , 'name' : '\u6570\u636e\u4e2d\u5fc3' , 'stocks' : 188 }, ... , { 'code' : '300008' , 'name' : '\u65b0\u80fd\u6e90\u6c7d\u8f66' , 'stocks' : 603 } ] Returns: Type Description [{'code' '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] Source code in omicron/models/board.py @classmethod async def board_info_by_security ( cls , security : str , _btype : BoardType = BoardType . CONCEPT ) -> List [ dict ]: \"\"\"\u83b7\u53d6\u80a1\u7968\u6240\u5728\u677f\u5757\u4fe1\u606f\uff1a\u540d\u79f0\uff0c\u4ee3\u7801 Examples: ```python stock_code = '002236' # \u5927\u534e\u80a1\u4efd\uff0c\u80a1\u7968\u4ee3\u7801\u4e0d\u5e26\u5b57\u6bcd\u540e\u7f00 stock_in_board = await Board.board_info_by_security(stock_code, _btype=BoardType.CONCEPT) print(stock_in_board) # returns: [ {'code': '301715', 'name': '\u8bc1\u91d1\u6301\u80a1', 'stocks': 208}, {'code': '308870', 'name': '\u6570\u5b57\u7ecf\u6d4e', 'stocks': 195}, {'code': '308642', 'name': '\u6570\u636e\u4e2d\u5fc3', 'stocks': 188}, ..., {'code': '300008', 'name': '\u65b0\u80fd\u6e90\u6c7d\u8f66', 'stocks': 603} ] ``` Returns: [{'code': '301505', 'name': '\u533b\u7597\u5668\u68b0\u6982\u5ff5'}] \"\"\" if not security : return [] rsp = await cls . _rpc_call ( \"info_by_sec\" , { \"board_type\" : _btype . value , \"security\" : security } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_info_by_security()"},{"location":"api/board/#omicron.models.board.Board.board_list","text":"\u83b7\u53d6\u677f\u5757\u5217\u8868 Parameters: Name Type Description Default _btype BoardType \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c BoardType.CONCEPT \u548c BoardType.INDUSTRY . Returns: Type Description List[List] \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a 1 2 3 4 5 6 [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] Source code in omicron/models/board.py @classmethod async def board_list ( cls , _btype : BoardType = BoardType . CONCEPT ) -> List [ List ]: \"\"\"\u83b7\u53d6\u677f\u5757\u5217\u8868 Args: _btype: \u677f\u5757\u7c7b\u522b\uff0c\u53ef\u9009\u503c`BoardType.CONCEPT`\u548c`BoardType.INDUSTRY`. Returns: \u677f\u5757\u5217\u8868\u3002\u6bcf\u4e00\u4e2a\u5b50\u5143\u7d20\u4ecd\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u7531\u677f\u5757\u4ee3\u7801(str), \u677f\u5757\u540d\u79f0(str)\u548c\u6210\u5458\u6570\u7ec4\u6210\u3002\u793a\u4f8b\uff1a ``` [ ['881101', '\u79cd\u690d\u4e1a\u4e0e\u6797\u4e1a', 24], ['881102', '\u517b\u6b96\u4e1a', 27], ['881103', '\u519c\u4ea7\u54c1\u52a0\u5de5', 41], ['881104', '\u519c\u4e1a\u670d\u52a1', 16], ] ``` \"\"\" rsp = await cls . _rpc_call ( \"board_list\" , { \"board_type\" : _btype . value }) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"board_list()"},{"location":"api/board/#omicron.models.board.Board.fuzzy_match_board_name","text":"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: 1 2 3 4 5 6 7 8 9 10 11 await Board . fuzzy_match_board_name ( \"\u6c7d\u8f66\" , BoardType . INDUSTRY ) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66' , '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6' , '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0' , '881128 \u6c7d\u8f66\u670d\u52a1' , '884107 \u6c7d\u8f66\u670d\u52a1\u2162' , '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] Parameters: Name Type Description Default pattern str \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 required _btype BoardType \u67e5\u8be2\u7c7b\u578b Returns: Type Description \u5305\u542b\u4ee5\u4e0bkey\u7684dict code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) Source code in omicron/models/board.py @classmethod async def fuzzy_match_board_name ( cls , pattern : str , _btype : BoardType = BoardType . CONCEPT ) -> dict : \"\"\"\u6a21\u7cca\u67e5\u8be2\u677f\u5757\u4ee3\u7801\u7684\u540d\u5b57 Examples: ```python await Board.fuzzy_match_board_name(\"\u6c7d\u8f66\", BoardType.INDUSTRY) # returns: [ '881125 \u6c7d\u8f66\u6574\u8f66', '881126 \u6c7d\u8f66\u96f6\u90e8\u4ef6', '881127 \u975e\u6c7d\u8f66\u4ea4\u8fd0', '881128 \u6c7d\u8f66\u670d\u52a1', '884107 \u6c7d\u8f66\u670d\u52a1\u2162', '884194 \u6c7d\u8f66\u96f6\u90e8\u4ef6\u2162' ] ``` Args: pattern: \u5f85\u67e5\u8be2\u6a21\u5f0f\u4e32 _btype: \u67e5\u8be2\u7c7b\u578b Returns: \u5305\u542b\u4ee5\u4e0bkey\u7684dict: code(\u677f\u5757\u4ee3\u7801), name\uff08\u677f\u5757\u540d\uff09, stocks(\u80a1\u7968\u6570) \"\"\" if not pattern : return [] rsp = await cls . _rpc_call ( \"fuzzy_match_name\" , { \"board_type\" : _btype . value , \"pattern\" : pattern } ) if rsp [ \"rc\" ] != 200 : return { \"status\" : 500 , \"msg\" : \"httpx RPC call failed\" } return rsp [ \"data\" ]","title":"fuzzy_match_board_name()"},{"location":"api/board/#omicron.models.board.Board.get_bars_in_range","text":"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[ start , end ]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 start = datetime . date ( 2022 , 9 , 1 ) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime . date ( 2023 , 3 , 1 ) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board . get_bars_in_range ( board_code , start , end ) bars [ - 3 :] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec . array ([ ( '2023-02-27T00:00:00' , 1117.748 , 1124.364 , 1108.741 , 1109.525 , 1.77208600e+08 , 1.13933095e+09 , 1. ), ( '2023-02-28T00:00:00' , 1112.246 , 1119.568 , 1109.827 , 1113.43 , 1.32828124e+08 , 6.65160380e+08 , 1. ), ( '2023-03-01T00:00:00' , 1122.233 , 1123.493 , 1116.62 , 1123.274 , 7.21718910e+07 , 3.71172850e+08 , 1. ) ], dtype = [( 'frame' , ' BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 Examples: ```python start = datetime.date(2022, 9, 1) # \u8d77\u59cb\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 end = datetime.date(2023, 3, 1) # \u622a\u6b62\u65f6\u95f4\uff0c \u53ef\u4fee\u6539 board_code = '881128' # \u6c7d\u8f66\u670d\u52a1\uff0c \u53ef\u4fee\u6539 bars = await Board.get_bars_in_range(board_code, start, end) bars[-3:] # \u6253\u5370\u540e3\u6761\u6570\u636e # prints: rec.array([ ('2023-02-27T00:00:00', 1117.748, 1124.364, 1108.741, 1109.525, 1.77208600e+08, 1.13933095e+09, 1.), ('2023-02-28T00:00:00', 1112.246, 1119.568, 1109.827, 1113.43 , 1.32828124e+08, 6.65160380e+08, 1.), ('2023-03-01T00:00:00', 1122.233, 1123.493, 1116.62 , 1123.274, 7.21718910e+07, 3.71172850e+08, 1.) ], dtype=[('frame', ' bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2 np \u00b6 Extension function related to numpy array_math_round ( arr , digits ) \u00b6 \u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr ) array_price_equal ( price1 , price2 ) \u00b6 \u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2 bars_since ( condition , default = None ) \u00b6 Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default ) bin_cut ( arr , n ) \u00b6 \u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )] count_between ( arr , start , end ) \u00b6 \u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter dataframe_to_structured_array ( df , dtypes = None ) \u00b6 convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes ) dict_to_numpy_array ( d , dtype ) \u00b6 convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] find_runs ( x ) \u00b6 Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths floor ( arr , item ) \u00b6 \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ] join_by_left ( key , r1 , r2 , mask = True ) \u00b6 \u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret numpy_append_fields ( base , names , data , dtypes ) \u00b6 \u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))] replace_zero ( ts , replacement = None ) \u00b6 \u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] rolling ( x , win , func ) \u00b6 \u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results ) shift ( arr , start , offset ) \u00b6 \u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ] smallest_n_argpos ( ts , n ) \u00b6 get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ] to_pydatetime ( tm ) \u00b6 \u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch ) top_n_argpos ( ts , n ) \u00b6 get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"Extensions"},{"location":"api/extensions/#omicron.extensions.decimals","text":"","title":"decimals"},{"location":"api/extensions/#omicron.extensions.decimals.math_round","text":"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits )","title":"math_round()"},{"location":"api/extensions/#omicron.extensions.decimals.price_equal","text":"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2","title":"price_equal()"},{"location":"api/extensions/#omicron.extensions.np","text":"Extension function related to numpy","title":"np"},{"location":"api/extensions/#omicron.extensions.np.array_math_round","text":"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr )","title":"array_math_round()"},{"location":"api/extensions/#omicron.extensions.np.array_price_equal","text":"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2","title":"array_price_equal()"},{"location":"api/extensions/#omicron.extensions.np.bars_since","text":"Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default )","title":"bars_since()"},{"location":"api/extensions/#omicron.extensions.np.bin_cut","text":"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )]","title":"bin_cut()"},{"location":"api/extensions/#omicron.extensions.np.count_between","text":"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter","title":"count_between()"},{"location":"api/extensions/#omicron.extensions.np.dataframe_to_structured_array","text":"convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes )","title":"dataframe_to_structured_array()"},{"location":"api/extensions/#omicron.extensions.np.dict_to_numpy_array","text":"convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"fill_nan()"},{"location":"api/extensions/#omicron.extensions.np.find_runs","text":"Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths","title":"find_runs()"},{"location":"api/extensions/#omicron.extensions.np.floor","text":"\u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ]","title":"floor()"},{"location":"api/extensions/#omicron.extensions.np.join_by_left","text":"\u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret","title":"join_by_left()"},{"location":"api/extensions/#omicron.extensions.np.numpy_append_fields","text":"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))]","title":"remove_nan()"},{"location":"api/extensions/#omicron.extensions.np.replace_zero","text":"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"replace_zero()"},{"location":"api/extensions/#omicron.extensions.np.rolling","text":"\u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results )","title":"rolling()"},{"location":"api/extensions/#omicron.extensions.np.shift","text":"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ]","title":"shift()"},{"location":"api/extensions/#omicron.extensions.np.smallest_n_argpos","text":"get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ]","title":"smallest_n_argpos()"},{"location":"api/extensions/#omicron.extensions.np.to_pydatetime","text":"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch )","title":"to_pydatetime()"},{"location":"api/extensions/#omicron.extensions.np.top_n_argpos","text":"get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"top_n_argpos()"},{"location":"api/metrics/","text":"\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570\u3002 \u00b6 usage : 1 from empyrical import aggregate_returns aggregate_returns \u00b6 external link alpha \u00b6 external link alpha_aligned \u00b6 external link alpha_beta \u00b6 external link alpha_beta_aligned \u00b6 external link annual_return \u00b6 external link annual_volatility \u00b6 external link beta \u00b6 external link beta_aligned \u00b6 external link beta_fragility_heuristic \u00b6 external link beta_fragility_heuristic_aligned \u00b6 external link cagr \u00b6 external link calmar_ratio \u00b6 external link capture \u00b6 external link compute_exposures \u00b6 external link conditional_value_at_risk \u00b6 external link cum_returns \u00b6 external link cum_returns_final \u00b6 external link down_alpha_beta \u00b6 external link down_capture \u00b6 external link downside_risk \u00b6 external link excess_sharpe \u00b6 external link gpd_risk_estimates \u00b6 external link gpd_risk_estimates_aligned \u00b6 external link max_drawdown \u00b6 external link omega_ratio \u00b6 external link perf_attrib \u00b6 external link periods \u00b6 external link roll_alpha \u00b6 external link roll_alpha_aligned \u00b6 external link roll_alpha_beta \u00b6 external link roll_alpha_beta_aligned \u00b6 external link roll_annual_volatility \u00b6 external link roll_beta \u00b6 external link roll_beta_aligned \u00b6 external link roll_down_capture \u00b6 external link roll_max_drawdown \u00b6 external link roll_sharpe_ratio \u00b6 external link roll_sortino_ratio \u00b6 external link roll_up_capture \u00b6 external link roll_up_down_capture \u00b6 external link sharpe_ratio \u00b6 external link simple_returns \u00b6 external link sortino_ratio \u00b6 external link stability_of_timeseries \u00b6 external link stats \u00b6 external link tail_ratio \u00b6 external link up_alpha_beta \u00b6 external link up_capture \u00b6 external link up_down_capture \u00b6 external link utils \u00b6 external link value_at_risk \u00b6 external link","title":"metrics"},{"location":"api/metrics/#\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570","text":"usage : 1 from empyrical import aggregate_returns","title":"\u4ee5\u4e0b\u529f\u80fd\u8bf7\u4f7f\u7528empyrical\u5305\u4e2d\u76f8\u5173\u7684\u51fd\u6570\u3002"},{"location":"api/metrics/#aggregate_returns","text":"external link","title":"aggregate_returns"},{"location":"api/metrics/#alpha","text":"external link","title":"alpha"},{"location":"api/metrics/#alpha_aligned","text":"external link","title":"alpha_aligned"},{"location":"api/metrics/#alpha_beta","text":"external link","title":"alpha_beta"},{"location":"api/metrics/#alpha_beta_aligned","text":"external link","title":"alpha_beta_aligned"},{"location":"api/metrics/#annual_return","text":"external link","title":"annual_return"},{"location":"api/metrics/#annual_volatility","text":"external link","title":"annual_volatility"},{"location":"api/metrics/#beta","text":"external link","title":"beta"},{"location":"api/metrics/#beta_aligned","text":"external link","title":"beta_aligned"},{"location":"api/metrics/#beta_fragility_heuristic","text":"external link","title":"beta_fragility_heuristic"},{"location":"api/metrics/#beta_fragility_heuristic_aligned","text":"external link","title":"beta_fragility_heuristic_aligned"},{"location":"api/metrics/#cagr","text":"external link","title":"cagr"},{"location":"api/metrics/#calmar_ratio","text":"external link","title":"calmar_ratio"},{"location":"api/metrics/#capture","text":"external link","title":"capture"},{"location":"api/metrics/#compute_exposures","text":"external link","title":"compute_exposures"},{"location":"api/metrics/#conditional_value_at_risk","text":"external link","title":"conditional_value_at_risk"},{"location":"api/metrics/#cum_returns","text":"external link","title":"cum_returns"},{"location":"api/metrics/#cum_returns_final","text":"external link","title":"cum_returns_final"},{"location":"api/metrics/#down_alpha_beta","text":"external link","title":"down_alpha_beta"},{"location":"api/metrics/#down_capture","text":"external link","title":"down_capture"},{"location":"api/metrics/#downside_risk","text":"external link","title":"downside_risk"},{"location":"api/metrics/#excess_sharpe","text":"external link","title":"excess_sharpe"},{"location":"api/metrics/#gpd_risk_estimates","text":"external link","title":"gpd_risk_estimates"},{"location":"api/metrics/#gpd_risk_estimates_aligned","text":"external link","title":"gpd_risk_estimates_aligned"},{"location":"api/metrics/#max_drawdown","text":"external link","title":"max_drawdown"},{"location":"api/metrics/#omega_ratio","text":"external link","title":"omega_ratio"},{"location":"api/metrics/#perf_attrib","text":"external link","title":"perf_attrib"},{"location":"api/metrics/#periods","text":"external link","title":"periods"},{"location":"api/metrics/#roll_alpha","text":"external link","title":"roll_alpha"},{"location":"api/metrics/#roll_alpha_aligned","text":"external link","title":"roll_alpha_aligned"},{"location":"api/metrics/#roll_alpha_beta","text":"external link","title":"roll_alpha_beta"},{"location":"api/metrics/#roll_alpha_beta_aligned","text":"external link","title":"roll_alpha_beta_aligned"},{"location":"api/metrics/#roll_annual_volatility","text":"external link","title":"roll_annual_volatility"},{"location":"api/metrics/#roll_beta","text":"external link","title":"roll_beta"},{"location":"api/metrics/#roll_beta_aligned","text":"external link","title":"roll_beta_aligned"},{"location":"api/metrics/#roll_down_capture","text":"external link","title":"roll_down_capture"},{"location":"api/metrics/#roll_max_drawdown","text":"external link","title":"roll_max_drawdown"},{"location":"api/metrics/#roll_sharpe_ratio","text":"external link","title":"roll_sharpe_ratio"},{"location":"api/metrics/#roll_sortino_ratio","text":"external link","title":"roll_sortino_ratio"},{"location":"api/metrics/#roll_up_capture","text":"external link","title":"roll_up_capture"},{"location":"api/metrics/#roll_up_down_capture","text":"external link","title":"roll_up_down_capture"},{"location":"api/metrics/#sharpe_ratio","text":"external link","title":"sharpe_ratio"},{"location":"api/metrics/#simple_returns","text":"external link","title":"simple_returns"},{"location":"api/metrics/#sortino_ratio","text":"external link","title":"sortino_ratio"},{"location":"api/metrics/#stability_of_timeseries","text":"external link","title":"stability_of_timeseries"},{"location":"api/metrics/#stats","text":"external link","title":"stats"},{"location":"api/metrics/#tail_ratio","text":"external link","title":"tail_ratio"},{"location":"api/metrics/#up_alpha_beta","text":"external link","title":"up_alpha_beta"},{"location":"api/metrics/#up_capture","text":"external link","title":"up_capture"},{"location":"api/metrics/#up_down_capture","text":"external link","title":"up_down_capture"},{"location":"api/metrics/#utils","text":"external link","title":"utils"},{"location":"api/metrics/#value_at_risk","text":"external link","title":"value_at_risk"},{"location":"api/omicron/","text":"Omicron\u63d0\u4f9b\u6570\u636e\u6301\u4e45\u5316\u3001\u65f6\u95f4\uff08\u65e5\u5386\u3001triggers)\u3001\u884c\u60c5\u6570\u636emodel\u3001\u57fa\u7840\u8fd0\u7b97\u548c\u57fa\u7840\u91cf\u5316\u56e0\u5b50 close () async \u00b6 \u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5 Source code in omicron/__init__.py async def close (): \"\"\"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5\"\"\" try : await cache . close () except Exception as e : # noqa pass init ( app_cache = 5 ) async \u00b6 \u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528 close() \u5173\u95ed Source code in omicron/__init__.py async def init ( app_cache : int = 5 ): \"\"\"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528`close()`\u5173\u95ed \"\"\" global cache await cache . init ( app = app_cache ) await tf . init () from omicron.models.security import Security await Security . init () Extensions package \u00b6 decimals \u00b6 math_round ( x , digits ) \u00b6 \u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits ) price_equal ( x , y ) \u00b6 \u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2 np \u00b6 Extension function related to numpy array_math_round ( arr , digits ) \u00b6 \u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr ) array_price_equal ( price1 , price2 ) \u00b6 \u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2 bars_since ( condition , default = None ) \u00b6 Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default ) bin_cut ( arr , n ) \u00b6 \u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )] count_between ( arr , start , end ) \u00b6 \u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter dataframe_to_structured_array ( df , dtypes = None ) \u00b6 convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes ) dict_to_numpy_array ( d , dtype ) \u00b6 convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] find_runs ( x ) \u00b6 Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths floor ( arr , item ) \u00b6 \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ] join_by_left ( key , r1 , r2 , mask = True ) \u00b6 \u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret numpy_append_fields ( base , names , data , dtypes ) \u00b6 \u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))] replace_zero ( ts , replacement = None ) \u00b6 \u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ] rolling ( x , win , func ) \u00b6 \u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results ) shift ( arr , start , offset ) \u00b6 \u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ] smallest_n_argpos ( ts , n ) \u00b6 get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ] to_pydatetime ( tm ) \u00b6 \u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch ) top_n_argpos ( ts , n ) \u00b6 get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ] Notify package \u00b6 dingtalk \u00b6 DingTalkMessage \u00b6 \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx Source code in omicron/notify/dingtalk.py class DingTalkMessage : \"\"\" \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx \"\"\" url = \"https://oapi.dingtalk.com/robot/send\" @classmethod def _get_access_token ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684access_token\"\"\" if hasattr ( cfg . notify , \"dingtalk_access_token\" ): return cfg . notify . dingtalk_access_token else : logger . error ( \"Dingtalk not configured, please add the following items: \\n \" \"notify: \\n \" \" dingtalk_access_token: xxxx \\n \" \" dingtalk_secret: xxxx \\n \" ) raise ConfigError ( \"dingtalk_access_token not found\" ) @classmethod def _get_secret ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684secret\"\"\" if hasattr ( cfg . notify , \"dingtalk_secret\" ): return cfg . notify . dingtalk_secret else : return None @classmethod def _get_url ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684\u6d88\u606f\u63a8\u9001\u5730\u5740\uff0c\u5c06\u7b7e\u540d\u548c\u65f6\u95f4\u6233\u62fc\u63a5\u5728url\u540e\u9762\"\"\" access_token = cls . _get_access_token () url = f \" { cls . url } ?access_token= { access_token } \" secret = cls . _get_secret () if secret : timestamp , sign = cls . _get_sign ( secret ) url = f \" { url } ×tamp= { timestamp } &sign= { sign } \" return url @classmethod def _get_sign ( cls , secret : str ): \"\"\"\u83b7\u53d6\u7b7e\u540d\u53d1\u9001\u7ed9\u9489\u9489\u673a\u5668\u4eba\"\"\" timestamp = str ( round ( time . time () * 1000 )) secret_enc = secret . encode ( \"utf-8\" ) string_to_sign = \" {} \\n {} \" . format ( timestamp , secret ) string_to_sign_enc = string_to_sign . encode ( \"utf-8\" ) hmac_code = hmac . new ( secret_enc , string_to_sign_enc , digestmod = hashlib . sha256 ) . digest () sign = urllib . parse . quote_plus ( base64 . b64encode ( hmac_code )) return timestamp , sign @classmethod def _send ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () response = httpx . post ( url , json = msg , timeout = 30 ) if response . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { response . content . decode () } \" ) return rsp = json . loads ( response . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return response . content . decode () @classmethod async def _send_async ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () async with httpx . AsyncClient () as client : r = await client . post ( url , json = msg , timeout = 30 ) if r . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { r . content . decode () } \" ) return rsp = json . loads ( r . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return r . content . decode () @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg ) text ( cls , content ) classmethod \u00b6 .. deprecated:: 2.0.0 use function ding instead Source code in omicron/notify/dingtalk.py @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg ) ding ( msg ) \u00b6 \u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1 \u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg Union[str, dict] \u5f85\u53d1\u9001\u6d88\u606f\u3002 required Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/dingtalk.py def ding ( msg : Union [ str , dict ]) -> Awaitable : \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1[\u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863](https://open.dingtalk.com/document/orgapp-server/message-type) ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg: \u5f85\u53d1\u9001\u6d88\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if isinstance ( msg , str ): msg_ = { \"text\" : { \"content\" : msg }, \"msgtype\" : \"text\" } elif isinstance ( msg , dict ): msg_ = { \"msgtype\" : \"markdown\" , \"markdown\" : { \"title\" : msg [ \"title\" ], \"text\" : msg [ \"text\" ]}, } else : raise TypeError task = asyncio . create_task ( DingTalkMessage . _send_async ( msg_ )) return task mail \u00b6 compose ( subject , plain_txt = None , html = None , attachment = None ) \u00b6 \u7f16\u5199MIME\u90ae\u4ef6\u3002 Parameters: Name Type Description Default subject str \u90ae\u4ef6\u4e3b\u9898 required plain_txt str \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 None html str html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. None attachment str \u9644\u4ef6\u6587\u4ef6\u540d None Returns: Type Description EmailMessage MIME mail Source code in omicron/notify/mail.py def compose ( subject : str , plain_txt : str = None , html : str = None , attachment : str = None ) -> EmailMessage : \"\"\"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Args: subject (str): \u90ae\u4ef6\u4e3b\u9898 plain_txt (str): \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 html (str, optional): html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. attachment (str, optional): \u9644\u4ef6\u6587\u4ef6\u540d Returns: MIME mail \"\"\" msg = EmailMessage () msg [ \"Subject\" ] = subject if html : msg . preamble = plain_txt or \"\" msg . set_content ( html , subtype = \"html\" ) else : assert plain_txt , \"Either plain_txt or html is required.\" msg . set_content ( plain_txt ) if attachment : ctype , encoding = mimetypes . guess_type ( attachment ) if ctype is None or encoding is not None : ctype = \"application/octet-stream\" maintype , subtype = ctype . split ( \"/\" , 1 ) with open ( attachment , \"rb\" ) as f : msg . add_attachment ( f . read (), maintype = maintype , subtype = subtype , filename = attachment ) return msg mail_notify ( subject = None , body = None , msg = None , html = False , receivers = None ) \u00b6 \u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a 1 2 3 4 5 notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf MAIL_PASSWORD \u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg EmailMessage [description]. Defaults to None. None subject str [description]. Defaults to None. None body str [description]. Defaults to None. None html bool body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. False receivers List[str], Optional \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py def mail_notify ( subject : str = None , body : str = None , msg : EmailMessage = None , html = False , receivers = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a ``` notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com ``` \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf`MAIL_PASSWORD`\u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg (EmailMessage, optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. body (str, optional): [description]. Defaults to None. html (bool, optional): body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. receivers (List[str], Optional): \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject or body ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) if msg is None : if html : msg = compose ( subject , html = body ) else : msg = compose ( subject , plain_txt = body ) cfg = cfg4py . get_instance () if not receivers : receivers = cfg . notify . mail_to password = os . environ . get ( \"MAIL_PASSWORD\" ) return send_mail ( cfg . notify . mail_from , receivers , password , msg , host = cfg . notify . mail_server ) send_mail ( sender , receivers , password , msg = None , host = None , port = 25 , cc = None , bcc = None , subject = None , body = None , username = None ) \u00b6 \u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default sender str [description] required receivers List[str] [description] required msg EmailMessage [description]. Defaults to None. None host str [description]. Defaults to None. None port int [description]. Defaults to 25. 25 cc List[str] [description]. Defaults to None. None bcc List[str] [description]. Defaults to None. None subject str [description]. Defaults to None. None plain str [description]. Defaults to None. required username str the username used to logon to mail server. if not provided, then sender is used. None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py @retry ( aiosmtplib . errors . SMTPConnectError , tries = 3 , backoff = 2 , delay = 30 , logger = logger ) def send_mail ( sender : str , receivers : List [ str ], password : str , msg : EmailMessage = None , host : str = None , port : int = 25 , cc : List [ str ] = None , bcc : List [ str ] = None , subject : str = None , body : str = None , username : str = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: sender (str): [description] receivers (List[str]): [description] msg (EmailMessage, optional): [description]. Defaults to None. host (str, optional): [description]. Defaults to None. port (int, optional): [description]. Defaults to 25. cc (List[str], optional): [description]. Defaults to None. bcc (List[str], optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. plain (str, optional): [description]. Defaults to None. username (str, optional): the username used to logon to mail server. if not provided, then `sender` is used. Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject is not None or body is not None ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) msg = msg or EmailMessage () if isinstance ( receivers , str ): receivers = [ receivers ] msg [ \"From\" ] = sender msg [ \"To\" ] = \", \" . join ( receivers ) if subject : msg [ \"subject\" ] = subject if body : msg . set_content ( body ) if cc : msg [ \"Cc\" ] = \", \" . join ( cc ) if bcc : msg [ \"Bcc\" ] = \", \" . join ( bcc ) username = username or sender if host is None : host = sender . split ( \"@\" )[ - 1 ] task = asyncio . create_task ( aiosmtplib . send ( msg , hostname = host , port = port , username = sender , password = password ) ) return task Backtesting Log Facility \u00b6 Info Since 2.0.0.a76 \u56de\u6d4b\u65f6\uff0c\u6253\u5370\u65f6\u95f4\u4e00\u822c\u8981\u6c42\u4e3a\u56de\u6d4b\u5f53\u65f6\u7684\u65f6\u95f4\uff0c\u800c\u975e\u7cfb\u7edf\u65f6\u95f4\u3002\u8fd9\u4e2a\u6a21\u5757\u63d0\u4f9b\u4e86\u6539\u5199\u65e5\u5fd7\u65f6\u95f4\u7684\u529f\u80fd\u3002 \u4f7f\u7528\u65b9\u6cd5\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 from omicron.core.backtestlog import BacktestLogger logger = BacktestLogger . getLogger ( __name__ ) logger . setLevel ( logging . INFO ) handler = logging . StreamHandler () # \u901a\u8fc7bt_date\u57df\u6765\u8bbe\u7f6e\u65e5\u671f\uff0c\u800c\u4e0d\u662fasctime handler . setFormatter ( Formatter ( \" %(bt_date)s %(message)s \" )) logging . basicConfig ( level = logging . INFO , handlers = [ handler ]) # \u8c03\u7528\u65f6\u4e0e\u666e\u901a\u65e5\u5fd7\u4e00\u6837\uff0c\u4f46\u8981\u589e\u52a0\u4e00\u4e2adate\u53c2\u6570 logger . info ( \"this is info\" , date = datetime . date ( 2022 , 3 , 1 )) \u4e0a\u8ff0\u4ee3\u7801\u5c06\u8f93\u51fa\uff1a 1 2022-03-01 this is info \u4f7f\u7528\u672c\u65e5\u5fd7\u7684\u6838\u5fc3\u662f\u4e0a\u8ff0\u4ee3\u7801\u4e2d\u7684\u7b2c3\u884c\u548c\u7b2c9\u884c\uff0c\u6700\u540e\uff0c\u5728\u8f93\u51fa\u65e5\u5fd7\u65f6\u52a0\u4e0a date=... \uff0c\u5982\u7b2c15\u884c\u6240\u793a\u3002 \u6ce8\u610f\u5728\u7b2c9\u884c\uff0c\u901a\u5e38\u662f logging.getLogger(__nam__) \uff0c\u800c\u8fd9\u91cc\u662f BacktestLogger.getLogger(__name__) \u5982\u679c\u4e0a\u8ff0\u8c03\u7528\u4e2d\u6ca1\u6709\u4f20\u5165 date \uff0c\u5219\u5c06\u4f7f\u7528\u8c03\u7528\u65f6\u95f4\uff0c\u6b64\u65f6\u884c\u4e3a\u8ddf\u539f\u65e5\u5fd7\u7cfb\u7edf\u4e00\u81f4\u3002 Warning \u5f53\u8c03\u7528logger.exception\u65f6\uff0c\u4e0d\u80fd\u4f20\u5165date\u53c2\u6570\u3002 \u914d\u7f6e\u6587\u4ef6\u793a\u4f8b \u00b6 \u5982\u679c\u8981\u901a\u8fc7\u914d\u7f6e\u6587\u4ef6\u6765\u914d\u7f6e\uff0c\u53ef\u4f7f\u7528\u4ee5\u4e0b\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 formatters : backtest : format : '%(bt_date)s | %(message)s' handlers : backtest : class : logging.StreamHandler formatter : backtest omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false loggers : omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false","title":"omicron"},{"location":"api/omicron/#omicron.close","text":"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5 Source code in omicron/__init__.py async def close (): \"\"\"\u5173\u95ed\u4e0e\u7f13\u5b58\u7684\u8fde\u63a5\"\"\" try : await cache . close () except Exception as e : # noqa pass","title":"close()"},{"location":"api/omicron/#omicron.init","text":"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528 close() \u5173\u95ed Source code in omicron/__init__.py async def init ( app_cache : int = 5 ): \"\"\"\u521d\u59cb\u5316Omicron \u521d\u59cb\u5316influxDB, \u7f13\u5b58\u7b49\u8fde\u63a5\uff0c \u5e76\u52a0\u8f7d\u65e5\u5386\u548c\u8bc1\u5238\u5217\u8868 \u4e0a\u8ff0\u521d\u59cb\u5316\u7684\u8fde\u63a5\uff0c\u5e94\u8be5\u5728\u7a0b\u5e8f\u9000\u51fa\u65f6\uff0c\u901a\u8fc7\u8c03\u7528`close()`\u5173\u95ed \"\"\" global cache await cache . init ( app = app_cache ) await tf . init () from omicron.models.security import Security await Security . init ()","title":"init()"},{"location":"api/omicron/#extensions-package","text":"","title":"Extensions package"},{"location":"api/omicron/#omicron.extensions.decimals","text":"","title":"decimals"},{"location":"api/omicron/#omicron.extensions.decimals.math_round","text":"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Parameters: Name Type Description Default x float \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 required digits int \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 required Source code in omicron/extensions/decimals.py def math_round ( x : float , digits : int ): \"\"\"\u7531\u4e8e\u6d6e\u70b9\u6570\u7684\u8868\u793a\u95ee\u9898\uff0c\u5f88\u591a\u8bed\u8a00\u7684round\u51fd\u6570\u4e0e\u6570\u5b66\u4e0a\u7684round\u51fd\u6570\u4e0d\u4e00\u81f4\u3002\u4e0b\u9762\u7684\u51fd\u6570\u7ed3\u679c\u4e0e\u6570\u5b66\u4e0a\u7684\u4e00\u81f4\u3002 Args: x: \u8981\u8fdb\u884c\u56db\u820d\u4e94\u5165\u7684\u6570\u5b57 digits: \u5c0f\u6570\u70b9\u540e\u4fdd\u7559\u7684\u4f4d\u6570 \"\"\" return int ( x * ( 10 ** digits ) + copysign ( 0.5 , x )) / ( 10 ** digits )","title":"math_round()"},{"location":"api/omicron/#omicron.extensions.decimals.price_equal","text":"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default x \u4ef7\u683c1 required y \u4ef7\u683c2 required Returns: Type Description bool \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse Source code in omicron/extensions/decimals.py def price_equal ( x : float , y : float ) -> bool : \"\"\"\u5224\u65ad\u80a1\u4ef7\u662f\u5426\u76f8\u7b49 Args: x : \u4ef7\u683c1 y : \u4ef7\u683c2 Returns: \u5982\u679c\u76f8\u7b49\u5219\u8fd4\u56deTrue\uff0c\u5426\u5219\u8fd4\u56deFalse \"\"\" return abs ( math_round ( x , 2 ) - math_round ( y , 2 )) < 1e-2","title":"price_equal()"},{"location":"api/omicron/#omicron.extensions.np","text":"Extension function related to numpy","title":"np"},{"location":"api/omicron/#omicron.extensions.np.array_math_round","text":"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Parameters: Name Type Description Default arr ArrayLike \u8f93\u5165\u6570\u7ec4 required digits int required Returns: Type Description np.ndarray \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 Source code in omicron/extensions/np.py def array_math_round ( arr : Union [ float , ArrayLike ], digits : int ) -> np . ndarray : \"\"\"\u5c06\u4e00\u7ef4\u6570\u7ec4arr\u7684\u6570\u636e\u8fdb\u884c\u56db\u820d\u4e94\u5165 numpy.around\u7684\u51fd\u6570\u5e76\u4e0d\u662f\u6570\u5b66\u4e0a\u7684\u56db\u820d\u4e94\u5165\uff0c\u5bf91.5\u548c2.5\u8fdb\u884cround\u7684\u7ed3\u679c\u90fd\u4f1a\u53d8\u62102\uff0c\u5728\u91d1\u878d\u9886\u57df\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u5fc5\u987b\u4f7f\u7528\u6570\u5b66\u610f\u4e49\u4e0a\u7684\u56db\u820d\u4e94\u5165\u3002 Args: arr (ArrayLike): \u8f93\u5165\u6570\u7ec4 digits (int): Returns: np.ndarray: \u56db\u820d\u4e94\u5165\u540e\u7684\u4e00\u7ef4\u6570\u7ec4 \"\"\" # \u5982\u679c\u662f\u5355\u4e2a\u5143\u7d20\uff0c\u5219\u76f4\u63a5\u8fd4\u56de if isinstance ( arr , float ): return decimals . math_round ( arr , digits ) f = np . vectorize ( lambda x : decimals . math_round ( x , digits )) return f ( arr )","title":"array_math_round()"},{"location":"api/omicron/#omicron.extensions.np.array_price_equal","text":"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Parameters: Name Type Description Default price1 ArrayLike \u4ef7\u683c\u6570\u7ec4 required price2 ArrayLike \u4ef7\u683c\u6570\u7ec4 required Returns: Type Description np.ndarray \u5224\u65ad\u7ed3\u679c Source code in omicron/extensions/np.py def array_price_equal ( price1 : ArrayLike , price2 : ArrayLike ) -> np . ndarray : \"\"\"\u5224\u65ad\u4e24\u4e2a\u4ef7\u683c\u6570\u7ec4\u662f\u5426\u76f8\u7b49 Args: price1 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 price2 (ArrayLike): \u4ef7\u683c\u6570\u7ec4 Returns: np.ndarray: \u5224\u65ad\u7ed3\u679c \"\"\" price1 = array_math_round ( price1 , 2 ) price2 = array_math_round ( price2 , 2 ) return abs ( price1 - price2 ) < 1e-2","title":"array_price_equal()"},{"location":"api/omicron/#omicron.extensions.np.bars_since","text":"Return the number of bars since condition sequence was last True , or if never, return default . 1 2 3 >>> condition = [True, True, False] >>> bars_since(condition) 1 Source code in omicron/extensions/np.py def bars_since ( condition : Sequence [ bool ], default = None ) -> int : \"\"\" Return the number of bars since `condition` sequence was last `True`, or if never, return `default`. >>> condition = [True, True, False] >>> bars_since(condition) 1 \"\"\" return next ( compress ( range ( len ( condition )), reversed ( condition )), default )","title":"bars_since()"},{"location":"api/omicron/#omicron.extensions.np.bin_cut","text":"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Parameters: Name Type Description Default arr [type] [description] required n [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def bin_cut ( arr : list , n : int ): \"\"\"\u5c06\u6570\u7ec4arr\u5207\u5206\u6210n\u4efd todo: use padding + reshape to boost performance Args: arr ([type]): [description] n ([type]): [description] Returns: [type]: [description] \"\"\" result = [[] for i in range ( n )] for i , e in enumerate ( arr ): result [ i % n ] . append ( e ) return [ e for e in result if len ( e )]","title":"bin_cut()"},{"location":"api/omicron/#omicron.extensions.np.count_between","text":"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c start \u5143\u7d20\u4e0e end \u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> count_between ( arr , 20050104 , 20050111 ) 6 >>> count_between ( arr , 20050104 , 20050109 ) 4 Source code in omicron/extensions/np.py def count_between ( arr , start , end ): \"\"\"\u8ba1\u7b97\u6570\u7ec4\u4e2d\uff0c`start`\u5143\u7d20\u4e0e`end`\u5143\u7d20\u4e4b\u95f4\u5171\u6709\u591a\u5c11\u4e2a\u5143\u7d20 \u8981\u6c42arr\u5fc5\u987b\u662f\u5df2\u6392\u5e8f\u3002\u8ba1\u7b97\u7ed3\u679c\u4f1a\u5305\u542b\u533a\u95f4\u8fb9\u754c\u70b9\u3002 Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> count_between(arr, 20050104, 20050111) 6 >>> count_between(arr, 20050104, 20050109) 4 \"\"\" pos_start = np . searchsorted ( arr , start , side = \"right\" ) pos_end = np . searchsorted ( arr , end , side = \"right\" ) counter = pos_end - pos_start + 1 if start < arr [ 0 ]: counter -= 1 if end > arr [ - 1 ]: counter -= 1 return counter","title":"count_between()"},{"location":"api/omicron/#omicron.extensions.np.dataframe_to_structured_array","text":"convert dataframe (with all columns, and index possibly) to numpy structured arrays len(dtypes) should be either equal to len(df.columns) or len(df.columns) + 1 . In the later case, it implies to include df.index into converted array. Parameters: Name Type Description Default df DataFrame the one needs to be converted required dtypes List[Tuple] Defaults to None. If it's None , then dtypes of df is used, in such case, the index of df will not be converted. None Returns: Type Description ArrayLike [description] Source code in omicron/extensions/np.py def dataframe_to_structured_array ( df : DataFrame , dtypes : List [ Tuple ] = None ) -> ArrayLike : \"\"\"convert dataframe (with all columns, and index possibly) to numpy structured arrays `len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array. Args: df: the one needs to be converted dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted. Returns: ArrayLike: [description] \"\"\" v = df if dtypes is not None : dtypes_in_dict = { key : value for key , value in dtypes } col_len = len ( df . columns ) if len ( dtypes ) == col_len + 1 : v = df . reset_index () rename_index_to = set ( dtypes_in_dict . keys ()) . difference ( set ( df . columns )) v . rename ( columns = { \"index\" : list ( rename_index_to )[ 0 ]}, inplace = True ) elif col_len != len ( dtypes ): raise ValueError ( f \"length of dtypes should be either { col_len } or { col_len + 1 } , is { len ( dtypes ) } \" ) # re-arrange order of dtypes, in order to align with df.columns dtypes = [] for name in v . columns : dtypes . append (( name , dtypes_in_dict [ name ])) else : dtypes = df . dtypes return np . array ( np . rec . fromrecords ( v . values ), dtype = dtypes )","title":"dataframe_to_structured_array()"},{"location":"api/omicron/#omicron.extensions.np.dict_to_numpy_array","text":"convert dictionary to numpy array Examples: d = {\"aaron\": 5, \"jack\": 6} dtype = [(\"name\", \"S8\"), (\"score\", \" np . array : \"\"\"convert dictionary to numpy array Examples: >>> d = {\"aaron\": 5, \"jack\": 6} >>> dtype = [(\"name\", \"S8\"), (\"score\", \">> dict_to_numpy_array(d, dtype) array([(b'aaron', 5), (b'jack', 6)], dtype=[('name', 'S8'), ('score', '>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 3 : 5 ] = np . NaN >>> fill_nan ( arr ) ... array ([ 0. , 1. , 2. , 2. , 2. , 5. ], dtype = float32 ) >>> arr = np . arange ( 6 , dtype = np . float32 ) >>> arr [ 0 : 2 ] = np . nan >>> fill_nan ( arr ) ... array ([ 2. , 2. , 2. , 3. , 4. , 5. ], dtype = float32 ) Parameters: Name Type Description Default ts np.array [description] required Source code in omicron/extensions/np.py def fill_nan ( ts : np . ndarray ): \"\"\"\u5c06ts\u4e2d\u7684NaN\u66ff\u6362\u4e3a\u5176\u524d\u503c \u5982\u679cts\u8d77\u5934\u7684\u5143\u7d20\u4e3aNaN\uff0c\u5219\u7528\u7b2c\u4e00\u4e2a\u975eNaN\u5143\u7d20\u66ff\u6362\u3002 \u5982\u679c\u6240\u6709\u5143\u7d20\u90fd\u4e3aNaN\uff0c\u5219\u65e0\u6cd5\u66ff\u6362\u3002 Example: >>> arr = np.arange(6, dtype=np.float32) >>> arr[3:5] = np.NaN >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([0., 1., 2., 2., 2., 5.], dtype=float32) >>> arr = np.arange(6, dtype=np.float32) >>> arr[0:2] = np.nan >>> fill_nan(arr) ... # doctest: +NORMALIZE_WHITESPACE array([2., 2., 2., 3., 4., 5.], dtype=float32) Args: ts (np.array): [description] \"\"\" if np . all ( np . isnan ( ts )): raise ValueError ( \"all of ts are NaN\" ) if ts [ 0 ] is None or math . isnan ( ts [ 0 ]): idx = np . argwhere ( ~ np . isnan ( ts ))[ 0 ] ts [ 0 ] = ts [ idx ] mask = np . isnan ( ts ) idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"fill_nan()"},{"location":"api/omicron/#omicron.extensions.np.find_runs","text":"Find runs of consecutive items in an array. Parameters: Name Type Description Default x ArrayLike the sequence to find runs in required Returns: Type Description Tuple[np.ndarray, np.ndarray, np.ndarray] A tuple of unique values, start indices, and length of runs Source code in omicron/extensions/np.py def find_runs ( x : ArrayLike ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ]: \"\"\"Find runs of consecutive items in an array. Args: x: the sequence to find runs in Returns: A tuple of unique values, start indices, and length of runs \"\"\" # ensure array x = np . asanyarray ( x ) if x . ndim != 1 : raise ValueError ( \"only 1D array supported\" ) n = x . shape [ 0 ] # handle empty array if n == 0 : return np . array ([]), np . array ([]), np . array ([]) else : # find run starts loc_run_start = np . empty ( n , dtype = bool ) loc_run_start [ 0 ] = True np . not_equal ( x [: - 1 ], x [ 1 :], out = loc_run_start [ 1 :]) run_starts = np . nonzero ( loc_run_start )[ 0 ] # find run values run_values = x [ loc_run_start ] # find run lengths run_lengths = np . diff ( np . append ( run_starts , n )) return run_values , run_starts , run_lengths","title":"find_runs()"},{"location":"api/omicron/#omicron.extensions.np.floor","text":"\u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e minute_frames_floor \u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [ 3 , 6 , 9 ] >>> floor ( a , - 1 ) 3 >>> floor ( a , 9 ) 9 >>> floor ( a , 10 ) 9 >>> floor ( a , 4 ) 3 >>> floor ( a , 10 ) 9 Parameters: Name Type Description Default arr required item required Source code in omicron/extensions/np.py def floor ( arr , item ): \"\"\" \u5728\u6570\u636earr\u4e2d\uff0c\u627e\u5230\u5c0f\u4e8e\u7b49\u4e8eitem\u7684\u90a3\u4e00\u4e2a\u503c\u3002\u5982\u679citem\u5c0f\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[0];\u5982\u679citem \u5927\u4e8e\u6240\u6709arr\u5143\u7d20\u7684\u503c\uff0c\u8fd4\u56dearr[-1] \u4e0e`minute_frames_floor`\u4e0d\u540c\u7684\u662f\uff0c\u672c\u51fd\u6570\u4e0d\u505a\u56de\u7ed5\u4e0e\u8fdb\u4f4d. Examples: >>> a = [3, 6, 9] >>> floor(a, -1) 3 >>> floor(a, 9) 9 >>> floor(a, 10) 9 >>> floor(a, 4) 3 >>> floor(a,10) 9 Args: arr: item: Returns: \"\"\" if item < arr [ 0 ]: return arr [ 0 ] index = np . searchsorted ( arr , item , side = \"right\" ) return arr [ index - 1 ]","title":"floor()"},{"location":"api/omicron/#omicron.extensions.np.join_by_left","text":"\u5de6\u8fde\u63a5 r1 , r2 by key \u5982\u679c r1 \u4e2d\u5b58\u5728 r2 \u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684 r2 \u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys Reference: stackoverflow Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np . array ([( 1 , 2 ), ( 1 , 3 ), ( 2 , 3 )], dtype = [( 'seq' , 'i4' ), ( 'score' , 'i4' )]) >>> r2 = np . array ([( 1 , 5 ), ( 4 , 7 )], dtype = [( 'seq' , 'i4' ), ( 'age' , 'i4' )]) >>> joined = join_by_left ( 'seq' , r1 , r2 ) >>> print ( joined ) [( 1 , 2 , 5 ) ( 1 , 3 , 5 ) ( 2 , 3 , -- )] >>> print ( joined . dtype ) ( numpy . record , [( 'seq' , '>> joined [ 2 ][ 2 ] masked >>> joined . tolist ()[ 2 ][ 2 ] == None True Parameters: Name Type Description Default key join\u5173\u952e\u5b57 required r1 \u6570\u636e\u96c61 required r2 \u6570\u636e\u96c62 required Returns: Type Description a numpy array Source code in omicron/extensions/np.py def join_by_left ( key , r1 , r2 , mask = True ): \"\"\"\u5de6\u8fde\u63a5 `r1`, `r2` by `key` \u5982\u679c`r1`\u4e2d\u5b58\u5728`r2`\u4e2d\u6ca1\u6709\u7684\u884c\uff0c\u5219\u8be5\u884c\u5bf9\u5e94\u7684`r2`\u4e2d\u7684\u90a3\u4e9b\u5b57\u6bb5\u5c06\u88abmask\uff0c\u6216\u8005\u586b\u5145\u968f\u673a\u6570\u3002 same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows r1 have duplicate keys [Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693) Examples: >>> # to join the following >>> # [[ 1, 2], >>> # [ 1, 3], x [[1, 5], >>> # [ 2, 3]] [4, 7]] >>> # only first two rows in left will be joined >>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')]) >>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')]) >>> joined = join_by_left('seq', r1, r2) >>> print(joined) [(1, 2, 5) (1, 3, 5) (2, 3, --)] >>> print(joined.dtype) (numpy.record, [('seq', '>> joined[2][2] masked >>> joined.tolist()[2][2] == None True Args: key : join\u5173\u952e\u5b57 r1 : \u6570\u636e\u96c61 r2 : \u6570\u636e\u96c62 Returns: a numpy array \"\"\" # figure out the dtype of the result array descr1 = r1 . dtype . descr descr2 = [ d for d in r2 . dtype . descr if d [ 0 ] not in r1 . dtype . names ] descrm = descr1 + descr2 # figure out the fields we'll need from each array f1 = [ d [ 0 ] for d in descr1 ] f2 = [ d [ 0 ] for d in descr2 ] # cache the number of columns in f1 ncol1 = len ( f1 ) # get a dict of the rows of r2 grouped by key rows2 = {} for row2 in r2 : rows2 . setdefault ( row2 [ key ], []) . append ( row2 ) # figure out how many rows will be in the result nrowm = 0 for k1 in r1 [ key ]: if k1 in rows2 : nrowm += len ( rows2 [ k1 ]) else : nrowm += 1 # allocate the return array # ret = np.full((nrowm, ), fill, dtype=descrm) _ret = np . recarray ( nrowm , dtype = descrm ) if mask : ret = np . ma . array ( _ret , mask = True ) else : ret = _ret # merge the data into the return array i = 0 for row1 in r1 : if row1 [ key ] in rows2 : for row2 in rows2 [ row1 [ key ]]: ret [ i ] = tuple ( row1 [ f1 ]) + tuple ( row2 [ f2 ]) i += 1 else : for j in range ( ncol1 ): ret [ i ][ j ] = row1 [ j ] i += 1 return ret","title":"join_by_left()"},{"location":"api/omicron/#omicron.extensions.np.numpy_append_fields","text":"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4 base \u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86 numpy.lib.recfunctions.rec_append_fields \u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a rec_append_fields \u4e0d\u80fd\u5904\u7406 data \u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Examples: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np . array ([ i for i in range ( 3 )], dtype = [( 'col1' , '>> new_list = [ 2 * i for i in range ( 3 )] >>> res = numpy_append_fields ( old , 'new_col' , new_list , [( 'new_col' , '>> print ( res ) ... [( 0. , 0. ) ( 1. , 2. ) ( 2. , 4. )] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [ res [ 'col1' ] . tolist (), res [ 'new_col' ] . tolist ()] >>> print ( numpy_append_fields ( old , ( 'col3' , 'col4' ), data , [( 'col3' , ' np . ndarray : \"\"\"\u7ed9\u73b0\u6709\u7684\u6570\u7ec4`base`\u589e\u52a0\u65b0\u7684\u5b57\u6bb5 \u5b9e\u73b0\u4e86`numpy.lib.recfunctions.rec_append_fields`\u7684\u529f\u80fd\u3002\u63d0\u4f9b\u8fd9\u4e2a\u529f\u80fd\uff0c\u662f\u56e0\u4e3a`rec_append_fields`\u4e0d\u80fd\u5904\u7406`data`\u5143\u7d20\u7684\u7c7b\u578b\u4e3aObject\u7684\u60c5\u51b5\u3002 \u65b0\u589e\u7684\u6570\u636e\u5217\u5c06\u987a\u5e8f\u6392\u5217\u5728\u5176\u5b83\u5217\u7684\u53f3\u8fb9\u3002 Example: >>> # \u65b0\u589e\u5355\u4e2a\u5b57\u6bb5 >>> import numpy >>> old = np.array([i for i in range(3)], dtype=[('col1', '>> new_list = [2 * i for i in range(3)] >>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '>> print(res) ... # doctest: +NORMALIZE_WHITESPACE [(0., 0.) (1., 2.) (2., 4.)] >>> # \u65b0\u589e\u591a\u4e2a\u5b57\u6bb5 >>> data = [res['col1'].tolist(), res['new_col'].tolist()] >>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', ' np . ndarray : \"\"\"\u4ece`ts`\u4e2d\u53bb\u9664NaN Args: ts (np.array): [description] Returns: np.array: [description] \"\"\" return ts [ ~ np . isnan ( ts . astype ( float ))]","title":"remove_nan()"},{"location":"api/omicron/#omicron.extensions.np.replace_zero","text":"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement Source code in omicron/extensions/np.py def replace_zero ( ts : np . ndarray , replacement = None ) -> np . ndarray : \"\"\"\u5c06ts\u4e2d\u76840\u66ff\u6362\u4e3a\u524d\u503c, \u5904\u7406volume\u6570\u636e\u65f6\u5e38\u7528\u7528\u5230 \u5982\u679c\u63d0\u4f9b\u4e86replacement, \u5219\u66ff\u6362\u4e3areplacement \"\"\" if replacement is not None : return np . where ( ts == 0 , replacement , ts ) if np . all ( ts == 0 ): raise ValueError ( \"all of ts are 0\" ) if ts [ 0 ] == 0 : idx = np . argwhere ( ts != 0 )[ 0 ] ts [ 0 ] = ts [ idx ] mask = ts == 0 idx = np . where ( ~ mask , np . arange ( mask . size ), 0 ) np . maximum . accumulate ( idx , out = idx ) return ts [ idx ]","title":"replace_zero()"},{"location":"api/omicron/#omicron.extensions.np.rolling","text":"\u5bf9\u5e8f\u5217 x \u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c func \u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Parameters: Name Type Description Default x [type] [description] required win [type] [description] required func [type] [description] required Returns: Type Description [type] [description] Source code in omicron/extensions/np.py def rolling ( x , win , func ): \"\"\"\u5bf9\u5e8f\u5217`x`\u8fdb\u884c\u7a97\u53e3\u6ed1\u52a8\u8ba1\u7b97\u3002 \u5982\u679c`func`\u8981\u5b9e\u73b0\u7684\u529f\u80fd\u662fargmax, argmin, max, mean, median, min, rank, std, sum, var\u7b49\uff0cmove_argmax\uff0c\u8bf7\u4f7f\u7528bottleneck\u4e2d\u7684move_argmin, move_max, move_mean, move_median, move_min move_rank, move_std, move_sum, move_var\u3002\u8fd9\u4e9b\u51fd\u6570\u7684\u6027\u80fd\u66f4\u597d\u3002 Args: x ([type]): [description] win ([type]): [description] func ([type]): [description] Returns: [type]: [description] \"\"\" results = [] for subarray in sliding_window_view ( x , window_shape = win ): results . append ( func ( subarray )) return np . array ( results )","title":"rolling()"},{"location":"api/omicron/#omicron.extensions.np.shift","text":"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42 arr \u5df2\u6392\u5e8f\u3002 offset \u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b offset \u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ] >>> shift ( arr , 20050104 , 1 ) 20050105 >>> shift ( arr , 20050105 , - 1 ) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift ( arr , 20050120 , 1 ) 20050120 Parameters: Name Type Description Default arr \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 required start numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b required offset int [description] required Returns: Type Description \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c Source code in omicron/extensions/np.py def shift ( arr , start , offset ): \"\"\"\u5728numpy\u6570\u7ec4arr\u4e2d\uff0c\u627e\u5230start(\u6216\u8005\u6700\u63a5\u8fd1\u7684\u4e00\u4e2a\uff09\uff0c\u53d6offset\u5bf9\u5e94\u7684\u5143\u7d20\u3002 \u8981\u6c42`arr`\u5df2\u6392\u5e8f\u3002`offset`\u4e3a\u6b63\uff0c\u8868\u660e\u5411\u540e\u79fb\u4f4d\uff1b`offset`\u4e3a\u8d1f\uff0c\u8868\u660e\u5411\u524d\u79fb\u4f4d Examples: >>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111] >>> shift(arr, 20050104, 1) 20050105 >>> shift(arr, 20050105, -1) 20050104 >>> # \u8d77\u59cb\u70b9\u5df2\u53f3\u8d8a\u754c\uff0c\u4e14\u5411\u53f3shift\uff0c\u8fd4\u56de\u8d77\u59cb\u70b9 >>> shift(arr, 20050120, 1) 20050120 Args: arr : \u5df2\u6392\u5e8f\u7684\u6570\u7ec4 start : numpy\u53ef\u63a5\u53d7\u7684\u6570\u636e\u7c7b\u578b offset (int): [description] Returns: \u79fb\u4f4d\u540e\u5f97\u5230\u7684\u5143\u7d20\u503c \"\"\" pos = np . searchsorted ( arr , start , side = \"right\" ) if pos + offset - 1 >= len ( arr ): return start else : return arr [ pos + offset - 1 ]","title":"shift()"},{"location":"api/omicron/#omicron.extensions.np.smallest_n_argpos","text":"get smallest n (min->max) elements and return argpos which its value ordered in ascent Examples: >>> smallest_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 8 , 7 ]) Parameters: Name Type Description Default ts np.array \u8f93\u5165\u7684\u6570\u7ec4 required n int \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def smallest_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get smallest n (min->max) elements and return argpos which its value ordered in ascent Example: >>> smallest_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([8, 7]) Args: ts (np.array): \u8f93\u5165\u7684\u6570\u7ec4 n (int): \u53d6\u6700\u5c0f\u7684n\u4e2a\u5143\u7d20 Returns: np.array: [description] \"\"\" return np . argsort ( ts )[: n ]","title":"smallest_n_argpos()"},{"location":"api/omicron/#omicron.extensions.np.to_pydatetime","text":"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: 1 2 3 4 arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) Parameters: Name Type Description Default tm the input numpy datetime object required Returns: Type Description datetime.datetime python datetime object .. deprecated:: 2.0.0 use tm.item() instead Source code in omicron/extensions/np.py @deprecated ( \"2.0.0\" , details = \"use `tm.item()` instead\" ) def to_pydatetime ( tm : np . datetime64 ) -> datetime . datetime : \"\"\"\u5c06numpy.datetime64\u5bf9\u8c61\u8f6c\u6362\u6210\u4e3apython\u7684datetime\u5bf9\u8c61 numpy.ndarray.item()\u65b9\u6cd5\u53ef\u7528\u4ee5\u5c06\u4efb\u4f55numpy\u5bf9\u8c61\u8f6c\u6362\u6210python\u5bf9\u8c61\uff0c\u63a8\u8350\u5728\u4efb\u4f55\u9002\u7528\u7684\u5730\u65b9\u4f7f\u7528.item()\u65b9\u6cd5\uff0c\u800c\u4e0d\u662f\u672c\u65b9\u6cd5\u3002\u793a\u4f8b: ``` arr = np.array(['2022-09-08', '2022-09-09'], dtype='datetime64[s]') arr.item(0) # output is datetime.datetime(2022, 9, 8, 0, 0) arr[1].item() # output is datetime.datetime(2022, 9, 9, 0, 0) ``` Args: tm : the input numpy datetime object Returns: python datetime object \"\"\" unix_epoch = np . datetime64 ( 0 , \"s\" ) one_second = np . timedelta64 ( 1 , \"s\" ) seconds_since_epoch = ( tm - unix_epoch ) / one_second return datetime . datetime . utcfromtimestamp ( seconds_since_epoch )","title":"to_pydatetime()"},{"location":"api/omicron/#omicron.extensions.np.top_n_argpos","text":"get top n (max->min) elements and return argpos which its value ordered in descent Examples: >>> top_n_argpos ([ np . nan , 4 , 3 , 9 , 8 , 5 , 2 , 1 , 0 , 6 , 7 ], 2 ) array ([ 3 , 4 ]) Parameters: Name Type Description Default ts np.array [description] required n int [description] required Returns: Type Description np.array [description] Source code in omicron/extensions/np.py def top_n_argpos ( ts : np . array , n : int ) -> np . array : \"\"\"get top n (max->min) elements and return argpos which its value ordered in descent Example: >>> top_n_argpos([np.nan, 4, 3, 9, 8, 5, 2, 1, 0, 6, 7], 2) array([3, 4]) Args: ts (np.array): [description] n (int): [description] Returns: np.array: [description] \"\"\" ts_ = np . copy ( ts ) ts_ [ np . isnan ( ts_ )] = - np . inf return np . argsort ( ts_ )[ - n :][:: - 1 ]","title":"top_n_argpos()"},{"location":"api/omicron/#notify-package","text":"","title":"Notify package"},{"location":"api/omicron/#omicron.notify.dingtalk","text":"","title":"dingtalk"},{"location":"api/omicron/#omicron.notify.dingtalk.DingTalkMessage","text":"\u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx Source code in omicron/notify/dingtalk.py class DingTalkMessage : \"\"\" \u9489\u9489\u7684\u673a\u5668\u4eba\u6d88\u606f\u63a8\u9001\u7c7b\uff0c\u5c01\u88c5\u4e86\u5e38\u7528\u7684\u6d88\u606f\u7c7b\u578b\u4ee5\u53ca\u52a0\u5bc6\u7b97\u6cd5 \u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684access_token \u5982\u679c\u914d\u7f6e\u4e86\u52a0\u7b7e\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684secret \u5982\u679c\u914d\u7f6e\u4e86\u81ea\u5b9a\u4e49\u5173\u952e\u8bcd\uff0c\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u914d\u7f6e\u9489\u9489\u7684\u673a\u5668\u4eba\u7684keyword\uff0c\u591a\u4e2a\u5173\u952e\u8bcd\u7528\u82f1\u6587\u9017\u53f7\u5206\u9694 \u5168\u90e8\u7684\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b\u5982\u4e0b, \u5176\u4e2dsecret\u548ckeyword\u53ef\u4ee5\u4e0d\u914d\u7f6e, access_token\u5fc5\u987b\u914d\u7f6e notify: dingtalk_access_token: xxxx dingtalk_secret: xxxx \"\"\" url = \"https://oapi.dingtalk.com/robot/send\" @classmethod def _get_access_token ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684access_token\"\"\" if hasattr ( cfg . notify , \"dingtalk_access_token\" ): return cfg . notify . dingtalk_access_token else : logger . error ( \"Dingtalk not configured, please add the following items: \\n \" \"notify: \\n \" \" dingtalk_access_token: xxxx \\n \" \" dingtalk_secret: xxxx \\n \" ) raise ConfigError ( \"dingtalk_access_token not found\" ) @classmethod def _get_secret ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684secret\"\"\" if hasattr ( cfg . notify , \"dingtalk_secret\" ): return cfg . notify . dingtalk_secret else : return None @classmethod def _get_url ( cls ): \"\"\"\u83b7\u53d6\u9489\u9489\u673a\u5668\u4eba\u7684\u6d88\u606f\u63a8\u9001\u5730\u5740\uff0c\u5c06\u7b7e\u540d\u548c\u65f6\u95f4\u6233\u62fc\u63a5\u5728url\u540e\u9762\"\"\" access_token = cls . _get_access_token () url = f \" { cls . url } ?access_token= { access_token } \" secret = cls . _get_secret () if secret : timestamp , sign = cls . _get_sign ( secret ) url = f \" { url } ×tamp= { timestamp } &sign= { sign } \" return url @classmethod def _get_sign ( cls , secret : str ): \"\"\"\u83b7\u53d6\u7b7e\u540d\u53d1\u9001\u7ed9\u9489\u9489\u673a\u5668\u4eba\"\"\" timestamp = str ( round ( time . time () * 1000 )) secret_enc = secret . encode ( \"utf-8\" ) string_to_sign = \" {} \\n {} \" . format ( timestamp , secret ) string_to_sign_enc = string_to_sign . encode ( \"utf-8\" ) hmac_code = hmac . new ( secret_enc , string_to_sign_enc , digestmod = hashlib . sha256 ) . digest () sign = urllib . parse . quote_plus ( base64 . b64encode ( hmac_code )) return timestamp , sign @classmethod def _send ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () response = httpx . post ( url , json = msg , timeout = 30 ) if response . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { response . content . decode () } \" ) return rsp = json . loads ( response . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return response . content . decode () @classmethod async def _send_async ( cls , msg ): \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba\"\"\" url = cls . _get_url () async with httpx . AsyncClient () as client : r = await client . post ( url , json = msg , timeout = 30 ) if r . status_code != 200 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { r . content . decode () } \" ) return rsp = json . loads ( r . content ) if rsp . get ( \"errcode\" ) != 0 : logger . error ( f \"failed to send message, content: { msg } , response from Dingtalk: { rsp } \" ) return r . content . decode () @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg )","title":"DingTalkMessage"},{"location":"api/omicron/#omicron.notify.dingtalk.DingTalkMessage.text","text":".. deprecated:: 2.0.0 use function ding instead Source code in omicron/notify/dingtalk.py @classmethod @deprecated ( \"2.0.0\" , details = \"use function `ding` instead\" ) def text ( cls , content ): msg = { \"text\" : { \"content\" : content }, \"msgtype\" : \"text\" } return cls . _send ( msg )","title":"text()"},{"location":"api/omicron/#omicron.notify.dingtalk.ding","text":"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1 \u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg Union[str, dict] \u5f85\u53d1\u9001\u6d88\u606f\u3002 required Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/dingtalk.py def ding ( msg : Union [ str , dict ]) -> Awaitable : \"\"\"\u53d1\u9001\u6d88\u606f\u5230\u9489\u9489\u673a\u5668\u4eba \u652f\u6301\u53d1\u9001\u7eaf\u6587\u672c\u6d88\u606f\u548cmarkdown\u683c\u5f0f\u7684\u6587\u672c\u6d88\u606f\u3002\u5982\u679c\u8981\u53d1\u9001markdown\u683c\u5f0f\u7684\u6d88\u606f\uff0c\u8bf7\u901a\u8fc7\u5b57\u5178\u4f20\u5165\uff0c\u5fc5\u987b\u5305\u542b\u5305\u542b\"title\"\u548c\"text\"\u4e24\u4e2a\u5b57\u6bb5\u3002\u66f4\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u89c1[\u9489\u9489\u5f00\u653e\u5e73\u53f0\u6587\u6863](https://open.dingtalk.com/document/orgapp-server/message-type) ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg: \u5f85\u53d1\u9001\u6d88\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if isinstance ( msg , str ): msg_ = { \"text\" : { \"content\" : msg }, \"msgtype\" : \"text\" } elif isinstance ( msg , dict ): msg_ = { \"msgtype\" : \"markdown\" , \"markdown\" : { \"title\" : msg [ \"title\" ], \"text\" : msg [ \"text\" ]}, } else : raise TypeError task = asyncio . create_task ( DingTalkMessage . _send_async ( msg_ )) return task","title":"ding()"},{"location":"api/omicron/#omicron.notify.mail","text":"","title":"mail"},{"location":"api/omicron/#omicron.notify.mail.compose","text":"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Parameters: Name Type Description Default subject str \u90ae\u4ef6\u4e3b\u9898 required plain_txt str \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 None html str html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. None attachment str \u9644\u4ef6\u6587\u4ef6\u540d None Returns: Type Description EmailMessage MIME mail Source code in omicron/notify/mail.py def compose ( subject : str , plain_txt : str = None , html : str = None , attachment : str = None ) -> EmailMessage : \"\"\"\u7f16\u5199MIME\u90ae\u4ef6\u3002 Args: subject (str): \u90ae\u4ef6\u4e3b\u9898 plain_txt (str): \u7eaf\u6587\u672c\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9 html (str, optional): html\u683c\u5f0f\u7684\u90ae\u4ef6\u5185\u5bb9. Defaults to None. attachment (str, optional): \u9644\u4ef6\u6587\u4ef6\u540d Returns: MIME mail \"\"\" msg = EmailMessage () msg [ \"Subject\" ] = subject if html : msg . preamble = plain_txt or \"\" msg . set_content ( html , subtype = \"html\" ) else : assert plain_txt , \"Either plain_txt or html is required.\" msg . set_content ( plain_txt ) if attachment : ctype , encoding = mimetypes . guess_type ( attachment ) if ctype is None or encoding is not None : ctype = \"application/octet-stream\" maintype , subtype = ctype . split ( \"/\" , 1 ) with open ( attachment , \"rb\" ) as f : msg . add_attachment ( f . read (), maintype = maintype , subtype = subtype , filename = attachment ) return msg","title":"compose()"},{"location":"api/omicron/#omicron.notify.mail.mail_notify","text":"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a 1 2 3 4 5 notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf MAIL_PASSWORD \u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default msg EmailMessage [description]. Defaults to None. None subject str [description]. Defaults to None. None body str [description]. Defaults to None. None html bool body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. False receivers List[str], Optional \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py def mail_notify ( subject : str = None , body : str = None , msg : EmailMessage = None , html = False , receivers = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u53d1\u9001\u8005\u3001\u63a5\u6536\u8005\u53ca\u90ae\u4ef6\u670d\u52a1\u5668\u7b49\u914d\u7f6e\u8bf7\u901a\u8fc7cfg4py\u914d\u7f6e\uff1a ``` notify: mail_from: aaron_yang@jieyu.ai mail_to: - code@jieyu.ai mail_server: smtp.ym.163.com ``` \u9a8c\u8bc1\u5bc6\u7801\u8bf7\u901a\u8fc7\u73af\u5883\u53d8\u91cf`MAIL_PASSWORD`\u6765\u914d\u7f6e\u3002 subject/body\u4e0emsg\u5fc5\u987b\u63d0\u4f9b\u5176\u4e00\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: msg (EmailMessage, optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. body (str, optional): [description]. Defaults to None. html (bool, optional): body\u662f\u5426\u6309html\u683c\u5f0f\u5904\u7406\uff1f Defaults to False. receivers (List[str], Optional): \u63a5\u6536\u8005\u4fe1\u606f\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4f7f\u7528\u9884\u5148\u914d\u7f6e\u7684\u63a5\u6536\u8005\u4fe1\u606f\u3002 Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject or body ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) if msg is None : if html : msg = compose ( subject , html = body ) else : msg = compose ( subject , plain_txt = body ) cfg = cfg4py . get_instance () if not receivers : receivers = cfg . notify . mail_to password = os . environ . get ( \"MAIL_PASSWORD\" ) return send_mail ( cfg . notify . mail_from , receivers , password , msg , host = cfg . notify . mail_server )","title":"mail_notify()"},{"location":"api/omicron/#omicron.notify.mail.send_mail","text":"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Parameters: Name Type Description Default sender str [description] required receivers List[str] [description] required msg EmailMessage [description]. Defaults to None. None host str [description]. Defaults to None. None port int [description]. Defaults to 25. 25 cc List[str] [description]. Defaults to None. None bcc List[str] [description]. Defaults to None. None subject str [description]. Defaults to None. None plain str [description]. Defaults to None. required username str the username used to logon to mail server. if not provided, then sender is used. None Returns: Type Description Awaitable \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 Source code in omicron/notify/mail.py @retry ( aiosmtplib . errors . SMTPConnectError , tries = 3 , backoff = 2 , delay = 30 , logger = logger ) def send_mail ( sender : str , receivers : List [ str ], password : str , msg : EmailMessage = None , host : str = None , port : int = 25 , cc : List [ str ] = None , bcc : List [ str ] = None , subject : str = None , body : str = None , username : str = None , ) -> Awaitable : \"\"\"\u53d1\u9001\u90ae\u4ef6\u901a\u77e5\u3002 \u5982\u679c\u53ea\u53d1\u9001\u7b80\u5355\u7684\u6587\u672c\u90ae\u4ef6\uff0c\u8bf7\u4f7f\u7528 send_mail(sender, receivers, subject=subject, plain=plain)\u3002\u5982\u679c\u8981\u53d1\u9001\u8f83\u590d\u6742\u7684\u5e26html\u548c\u9644\u4ef6\u7684\u90ae\u4ef6\uff0c\u8bf7\u5148\u8c03\u7528compose()\u751f\u6210\u4e00\u4e2aEmailMessage,\u7136\u540e\u518d\u8c03\u7528send_mail(sender, receivers, msg)\u6765\u53d1\u9001\u90ae\u4ef6\u3002 ???+ Important \u5fc5\u987b\u5728\u5f02\u6b65\u7ebf\u7a0b(\u5373\u8fd0\u884casyncio loop\u7684\u7ebf\u7a0b\uff09\u4e2d\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5426\u5219\u4f1a\u629b\u51fa\u5f02\u5e38\u3002 \u6b64\u65b9\u6cd5\u8fd4\u56de\u4e00\u4e2aAwaitable\uff0c\u60a8\u53ef\u4ee5\u7b49\u5f85\u5b83\u5b8c\u6210\uff0c\u4e5f\u53ef\u4ee5\u5ffd\u7565\u8fd4\u56de\u503c\uff0c\u6b64\u65f6\u5b83\u5c06\u4f5c\u4e3a\u4e00\u4e2a\u540e\u53f0\u4efb\u52a1\u6267\u884c\uff0c\u4f46\u5b8c\u6210\u7684\u65f6\u95f4\u4e0d\u786e\u5b9a\u3002 Args: sender (str): [description] receivers (List[str]): [description] msg (EmailMessage, optional): [description]. Defaults to None. host (str, optional): [description]. Defaults to None. port (int, optional): [description]. Defaults to 25. cc (List[str], optional): [description]. Defaults to None. bcc (List[str], optional): [description]. Defaults to None. subject (str, optional): [description]. Defaults to None. plain (str, optional): [description]. Defaults to None. username (str, optional): the username used to logon to mail server. if not provided, then `sender` is used. Returns: \u53d1\u9001\u6d88\u606f\u7684\u540e\u53f0\u4efb\u52a1\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u6b64\u8fd4\u56de\u53e5\u67c4\u6765\u53d6\u6d88\u4efb\u52a1\u3002 \"\"\" if all ([ msg is not None , subject is not None or body is not None ]): raise TypeError ( \"msg\u53c2\u6570\u4e0esubject/body\u53ea\u80fd\u63d0\u4f9b\u5176\u4e2d\u4e4b\u4e00\" ) elif all ([ msg is None , subject is None , body is None ]): raise TypeError ( \"\u5fc5\u987b\u63d0\u4f9bmsg\u53c2\u6570\u6216\u8005subjecdt/body\u53c2\u6570\" ) msg = msg or EmailMessage () if isinstance ( receivers , str ): receivers = [ receivers ] msg [ \"From\" ] = sender msg [ \"To\" ] = \", \" . join ( receivers ) if subject : msg [ \"subject\" ] = subject if body : msg . set_content ( body ) if cc : msg [ \"Cc\" ] = \", \" . join ( cc ) if bcc : msg [ \"Bcc\" ] = \", \" . join ( bcc ) username = username or sender if host is None : host = sender . split ( \"@\" )[ - 1 ] task = asyncio . create_task ( aiosmtplib . send ( msg , hostname = host , port = port , username = sender , password = password ) ) return task","title":"send_mail()"},{"location":"api/omicron/#backtesting-log-facility","text":"Info Since 2.0.0.a76 \u56de\u6d4b\u65f6\uff0c\u6253\u5370\u65f6\u95f4\u4e00\u822c\u8981\u6c42\u4e3a\u56de\u6d4b\u5f53\u65f6\u7684\u65f6\u95f4\uff0c\u800c\u975e\u7cfb\u7edf\u65f6\u95f4\u3002\u8fd9\u4e2a\u6a21\u5757\u63d0\u4f9b\u4e86\u6539\u5199\u65e5\u5fd7\u65f6\u95f4\u7684\u529f\u80fd\u3002 \u4f7f\u7528\u65b9\u6cd5\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 from omicron.core.backtestlog import BacktestLogger logger = BacktestLogger . getLogger ( __name__ ) logger . setLevel ( logging . INFO ) handler = logging . StreamHandler () # \u901a\u8fc7bt_date\u57df\u6765\u8bbe\u7f6e\u65e5\u671f\uff0c\u800c\u4e0d\u662fasctime handler . setFormatter ( Formatter ( \" %(bt_date)s %(message)s \" )) logging . basicConfig ( level = logging . INFO , handlers = [ handler ]) # \u8c03\u7528\u65f6\u4e0e\u666e\u901a\u65e5\u5fd7\u4e00\u6837\uff0c\u4f46\u8981\u589e\u52a0\u4e00\u4e2adate\u53c2\u6570 logger . info ( \"this is info\" , date = datetime . date ( 2022 , 3 , 1 )) \u4e0a\u8ff0\u4ee3\u7801\u5c06\u8f93\u51fa\uff1a 1 2022-03-01 this is info \u4f7f\u7528\u672c\u65e5\u5fd7\u7684\u6838\u5fc3\u662f\u4e0a\u8ff0\u4ee3\u7801\u4e2d\u7684\u7b2c3\u884c\u548c\u7b2c9\u884c\uff0c\u6700\u540e\uff0c\u5728\u8f93\u51fa\u65e5\u5fd7\u65f6\u52a0\u4e0a date=... \uff0c\u5982\u7b2c15\u884c\u6240\u793a\u3002 \u6ce8\u610f\u5728\u7b2c9\u884c\uff0c\u901a\u5e38\u662f logging.getLogger(__nam__) \uff0c\u800c\u8fd9\u91cc\u662f BacktestLogger.getLogger(__name__) \u5982\u679c\u4e0a\u8ff0\u8c03\u7528\u4e2d\u6ca1\u6709\u4f20\u5165 date \uff0c\u5219\u5c06\u4f7f\u7528\u8c03\u7528\u65f6\u95f4\uff0c\u6b64\u65f6\u884c\u4e3a\u8ddf\u539f\u65e5\u5fd7\u7cfb\u7edf\u4e00\u81f4\u3002 Warning \u5f53\u8c03\u7528logger.exception\u65f6\uff0c\u4e0d\u80fd\u4f20\u5165date\u53c2\u6570\u3002","title":"Backtesting Log Facility"},{"location":"api/omicron/#omicron.core.backtestlog--\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b","text":"\u5982\u679c\u8981\u901a\u8fc7\u914d\u7f6e\u6587\u4ef6\u6765\u914d\u7f6e\uff0c\u53ef\u4f7f\u7528\u4ee5\u4e0b\u793a\u4f8b\uff1a 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 formatters : backtest : format : '%(bt_date)s | %(message)s' handlers : backtest : class : logging.StreamHandler formatter : backtest omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false loggers : omicron.base.strategy : level : INFO handlers : [ backtest ] propagate : false","title":"\u914d\u7f6e\u6587\u4ef6\u793a\u4f8b"},{"location":"api/security/","text":"Query \u00b6 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531 Security.select() \u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7 eval \u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 Source code in omicron/models/security.py class Query : \"\"\"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531`Security.select()`\u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7`eval`\u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 \"\"\" def __init__ ( self , target_date : datetime . date = None ): if target_date is None : # \u805a\u5bbd\u4e0d\u4e00\u5b9a\u4f1a\u53ca\u65f6\u66f4\u65b0\u6570\u636e\uff0c\u56e0\u6b64db\u4e2d\u4e0d\u5b58\u653e\u5f53\u5929\u7684\u6570\u636e\uff0c\u5982\u679c\u4f20\u7a7a\uff0c\u67e5cache self . target_date = None else : # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u53d6\u5f53\u5929\uff0c\u5426\u5219\u53d6\u524d\u4e00\u5929 self . target_date = tf . day_shift ( target_date , 0 ) # \u540d\u5b57\uff0c\u663e\u793a\u540d\uff0c\u7c7b\u578b\u8fc7\u6ee4\u5668 self . _name_pattern = None # \u5b57\u6bcd\u540d\u5b57 self . _alias_pattern = None # \u663e\u793a\u540d self . _type_pattern = None # \u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4e3a\u5168\u90e8\uff0c\u5982\u679c\u4f20\u5165\u7a7a\u503c\u5219\u53ea\u9009\u62e9\u80a1\u7968\u548c\u6307\u6570 # \u5f00\u5173\u9009\u9879 self . _exclude_kcb = False # \u79d1\u521b\u677f self . _exclude_cyb = False # \u521b\u4e1a\u677f self . _exclude_st = False # ST self . _include_exit = False # \u662f\u5426\u5305\u542b\u5df2\u9000\u5e02\u8bc1\u5238(\u9ed8\u8ba4\u4e0d\u5305\u62ec\u5f53\u5929\u9000\u5e02\u7684) # \u4e0b\u5217\u5f00\u5173\u4f18\u5148\u7ea7\u9ad8\u4e8e\u4e0a\u9762\u7684 self . _only_kcb = False self . _only_cyb = False self . _only_st = False def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results alias_like ( self , display_name ) \u00b6 \u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Parameters: Name Type Description Default display_name str \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" required Source code in omicron/models/security.py def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self eval ( self ) async \u00b6 \u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: Type Description List[str] \u4ee3\u7801\u5217\u8868 Source code in omicron/models/security.py async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results exclude_cyb ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self exclude_kcb ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self exclude_st ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self include_exit ( self ) \u00b6 \u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238 Source code in omicron/models/security.py def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self name_like ( self , name ) \u00b6 \u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0 name \u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Parameters: Name Type Description Default name str \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" required Source code in omicron/models/security.py def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self only_cyb ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self only_kcb ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self only_st ( self ) \u00b6 \u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238 Source code in omicron/models/security.py def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self types ( self , types ) \u00b6 \u9009\u62e9\u7c7b\u578b\u5728 types \u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Parameters: Name Type Description Default types List[str] \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 required Source code in omicron/models/security.py def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self Security \u00b6 Source code in omicron/models/security.py class Security : _securities = [] _securities_date = None _security_types = set () _stocks = [] @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None @classmethod async def get_security_types ( cls ): if cls . _security_types : return list ( cls . _security_types ) else : return None @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None @classmethod def fuzzy_match_ex ( cls , query : str ) -> Dict [ str , Tuple ]: # fixme: \u6b64\u65b9\u6cd5\u4e0eStock.fuzzy_match\u91cd\u590d\uff0c\u5e76\u4e14\u8fdb\u884c\u4e86\u7c7b\u578b\u9650\u5236\uff0c\u4f7f\u5f97\u5176\u4e0d\u9002\u5408\u653e\u5728Security\u91cc\uff0c\u4ee5\u53ca\u4f5c\u4e3a\u4e00\u4e2a\u901a\u7528\u65b9\u6cd5 query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"code\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"name\" ] . startswith ( query ) and sec [ \"type\" ] == \"stock\" } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"alias\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } @classmethod async def info ( cls , code , date = None ): _obj = await cls . query_security_via_date ( code , date ) if _obj is None : return None # \"_time\", \"code\", \"type\", \"alias\", \"end\", \"ipo\", \"name\" d1 = convert_nptime_to_datetime ( _obj [ \"ipo\" ]) . date () d2 = convert_nptime_to_datetime ( _obj [ \"end\" ]) . date () return { \"type\" : _obj [ \"type\" ], \"display_name\" : _obj [ \"alias\" ], \"alias\" : _obj [ \"alias\" ], \"end\" : d2 , \"start\" : d1 , \"name\" : _obj [ \"name\" ], } @classmethod async def name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"name\" ] @classmethod async def alias ( cls , code , date = None ): return await cls . display_name ( code , date ) @classmethod async def display_name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"alias\" ] @classmethod async def start_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"ipo\" ]) . date () @classmethod async def end_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"end\" ]) . date () @classmethod async def security_type ( cls , code , date = None ) -> SecurityType : _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"type\" ] @classmethod async def query_security_via_date ( cls , code : str , date : datetime . date = None ): if date is None : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : date = arrow . get ( date_in_cache ) . date () if date > cls . _securities_date : await cls . load_securities () results = cls . _securities [ cls . _securities [ \"code\" ] == code ] else : # \u4eceinfluxdb\u67e5\u627e date = tf . day_shift ( date , 0 ) results = await cls . load_securities_from_db ( date , code ) if results is not None and len ( results ) > 0 : return results [ 0 ] else : return None @classmethod def select ( cls , date : datetime . date = None ) -> Query : if date is None : return Query ( target_date = None ) else : return Query ( target_date = date ) @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" )) @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) @classmethod async def load_securities_from_db ( cls , target_date : datetime . date , code : str = None ): if target_date is None : return None client = get_influx_client () measurement = \"security_list\" flux = ( Flux () . measurement ( measurement ) . range ( target_date , target_date ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : # \"_time\", \"code\", \"code, alias, name, start, end, type\" _securities = np . array ( [ tuple ( x [ \"info\" ] . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) return _securities else : return None @classmethod async def get_datescope_from_db ( cls ): # fixme: \u51fd\u6570\u540d\u65e0\u6cd5\u53cd\u6620\u7528\u9014\uff0c\u9700\u8981\u589e\u52a0\u6587\u6863\u6ce8\u91ca\uff0c\u8bf4\u660e\u8be5\u51fd\u6570\u7684\u4f5c\u7528,\u6216\u8005\u4e0d\u5e94\u8be5\u51fa\u73b0\u5728\u6b64\u7c7b\u4e2d\uff1f client = get_influx_client () measurement = \"security_list\" date1 = arrow . get ( \"2005-01-01\" ) . date () date2 = arrow . now () . naive . date () flux = ( Flux () . measurement ( measurement ) . range ( date1 , date2 ) . bucket ( client . _bucket ) . tags ({ \"code\" : \"000001.XSHE\" }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None , None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : d1 = convert_nptime_to_datetime ( secs [ 0 ][ \"_time\" ]) d2 = convert_nptime_to_datetime ( secs [ len ( secs ) - 1 ][ \"_time\" ]) return d1 . date (), d2 . date () else : return None , None @classmethod async def _notify_special_bonusnote ( cls , code , note , cancel_date ): # fixme: \u8fd9\u4e2a\u51fd\u6570\u5e94\u8be5\u51fa\u73b0\u5728omega\u4e2d\uff1f default_cancel_date = datetime . date ( 2099 , 1 , 1 ) # \u9ed8\u8ba4\u65e0\u53d6\u6d88\u516c\u544a # report this special event to notify user if cancel_date != default_cancel_date : ding ( \"security %s , bonus_cancel_pub_date %s \" % ( code , cancel_date )) if note . find ( \"\u6d41\u901a\" ) != - 1 : # \u68c0\u67e5\u662f\u5426\u6709\u201c\u6d41\u901a\u80a1\u201d\u6587\u5b57 ding ( \"security %s , special xrxd note: %s \" % ( code , note )) @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) @classmethod async def _load_xrxd_from_db ( cls , code , dt_start : datetime . date , dt_end : datetime . date ): if dt_start is None or dt_end is None : return [] client = get_influx_client () measurement = \"security_xrxd_reports\" flux = ( Flux () . measurement ( measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return [] ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : _reports = np . array ( [ tuple ( x [ \"info\" ] . split ( \"|\" )) for x in secs ], dtype = xrxd_info_dtype ) return _reports else : return [] @classmethod async def get_xrxd_info ( cls , dt : datetime . date , code : str = None ): if dt is None : return None # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) reports = await cls . _load_xrxd_from_db ( code , dt , dt ) if len ( reports ) == 0 : return None readable_reports = [] for report in reports : xr_date = convert_nptime_to_datetime ( report [ 1 ]) . date () readable_reports . append ( { \"code\" : report [ 0 ], \"xr_date\" : xr_date , \"bonus\" : report [ 3 ], \"dividend\" : report [ 4 ], \"transfer\" : report [ 5 ], \"bonusnote\" : report [ 2 ], } ) return readable_reports get_stock ( code ) classmethod \u00b6 \u6839\u636e code \u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 1 2 item = Security . get_stock ( \"000001.XSHE\" ) print ( item [ \"alias\" ]) \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Parameters: Name Type Description Default code \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 required Returns: Type Description numpy.ndarray[Any, numpy.dtype[[('code', 'O'), ('alias', 'O'), ('name', 'O'), ('ipo', 'datetime64[s]'), ('end', 'datetime64[s]'), ('type', 'O')]]] \u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 Source code in omicron/models/security.py @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None init () async classmethod \u00b6 \u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Exceptions: Type Description DataNotReadyError \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Source code in omicron/models/security.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True load_securities () async classmethod \u00b6 \u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Source code in omicron/models/security.py @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None save_securities ( securities , dt ) async classmethod \u00b6 \u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default securities List[str] \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 required Source code in omicron/models/security.py @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) save_xrxd_reports ( reports , dt ) async classmethod \u00b6 \u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default reports List[str] \u5206\u7ea2\u9001\u80a1\u516c\u544a required Source code in omicron/models/security.py @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) update_secs_cache ( dt , securities ) async classmethod \u00b6 \u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Parameters: Name Type Description Default dt date \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f required securities List[Tuple] \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b required Source code in omicron/models/security.py @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" ))","title":"security"},{"location":"api/security/#omicron.models.security.Query","text":"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531 Security.select() \u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7 eval \u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 Source code in omicron/models/security.py class Query : \"\"\"\u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61 \u8bc1\u5238\u4fe1\u606f\u67e5\u8be2\u5bf9\u8c61\uff0c\u7531`Security.select()`\u65b9\u6cd5\u751f\u6210\uff0c\u652f\u6301\u94fe\u5f0f\u67e5\u8be2\u3002\u901a\u8fc7`eval`\u51fd\u6570\u7ed3\u675f\u94fe\u5f0f\u8c03\u7528\u5e76\u751f\u6210\u67e5\u8be2\u7ed3\u679c\u3002 \"\"\" def __init__ ( self , target_date : datetime . date = None ): if target_date is None : # \u805a\u5bbd\u4e0d\u4e00\u5b9a\u4f1a\u53ca\u65f6\u66f4\u65b0\u6570\u636e\uff0c\u56e0\u6b64db\u4e2d\u4e0d\u5b58\u653e\u5f53\u5929\u7684\u6570\u636e\uff0c\u5982\u679c\u4f20\u7a7a\uff0c\u67e5cache self . target_date = None else : # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u53d6\u5f53\u5929\uff0c\u5426\u5219\u53d6\u524d\u4e00\u5929 self . target_date = tf . day_shift ( target_date , 0 ) # \u540d\u5b57\uff0c\u663e\u793a\u540d\uff0c\u7c7b\u578b\u8fc7\u6ee4\u5668 self . _name_pattern = None # \u5b57\u6bcd\u540d\u5b57 self . _alias_pattern = None # \u663e\u793a\u540d self . _type_pattern = None # \u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4e3a\u5168\u90e8\uff0c\u5982\u679c\u4f20\u5165\u7a7a\u503c\u5219\u53ea\u9009\u62e9\u80a1\u7968\u548c\u6307\u6570 # \u5f00\u5173\u9009\u9879 self . _exclude_kcb = False # \u79d1\u521b\u677f self . _exclude_cyb = False # \u521b\u4e1a\u677f self . _exclude_st = False # ST self . _include_exit = False # \u662f\u5426\u5305\u542b\u5df2\u9000\u5e02\u8bc1\u5238(\u9ed8\u8ba4\u4e0d\u5305\u62ec\u5f53\u5929\u9000\u5e02\u7684) # \u4e0b\u5217\u5f00\u5173\u4f18\u5148\u7ea7\u9ad8\u4e8e\u4e0a\u9762\u7684 self . _only_kcb = False self . _only_cyb = False self . _only_st = False def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results","title":"Query"},{"location":"api/security/#omicron.models.security.Query.alias_like","text":"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Parameters: Name Type Description Default display_name str \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" required Source code in omicron/models/security.py def alias_like ( self , display_name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u663e\u793a\u540d\u4e2d\u51fa\u73b0`display_name\u7684\u54c1\u79cd Args: display_name: \u663e\u793a\u540d\uff0c\u6bd4\u5982\u201c\u4e2d\u56fd\u5e73\u5b89\" \"\"\" if display_name is None or len ( display_name ) == 0 : self . _alias_pattern = None else : self . _alias_pattern = display_name return self","title":"alias_like()"},{"location":"api/security/#omicron.models.security.Query.eval","text":"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: Type Description List[str] \u4ee3\u7801\u5217\u8868 Source code in omicron/models/security.py async def eval ( self ) -> List [ str ]: \"\"\"\u5bf9\u67e5\u8be2\u7ed3\u679c\u8fdb\u884c\u6c42\u503c\uff0c\u8fd4\u56decode\u5217\u8868 Returns: \u4ee3\u7801\u5217\u8868 \"\"\" logger . debug ( \"eval, date: %s \" , self . target_date ) logger . debug ( \"eval, names and types: %s , %s , %s \" , self . _name_pattern , self . _alias_pattern , self . _type_pattern , ) logger . debug ( \"eval, exclude and include: %s , %s , %s , %s \" , self . _exclude_cyb , self . _exclude_st , self . _exclude_kcb , self . _include_exit , ) logger . debug ( \"eval, only: %s , %s , %s \" , self . _only_cyb , self . _only_st , self . _only_kcb ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache : # \u65e0\u6b64\u6570\u636e\u8bf4\u660eomega\u6709\u67d0\u4e9b\u95ee\u9898\uff0c\u4e0d\u5904\u7406 _date = arrow . get ( date_in_cache ) . date () else : now = datetime . datetime . now () _date = tf . day_shift ( now , 0 ) # \u786e\u5b9a\u6570\u636e\u6e90\uff0ccache\u4e3a\u5f53\u59298\u70b9\u4e4b\u540e\u83b7\u53d6\u7684\u6570\u636e\uff0c\u6570\u636e\u5e93\u5b58\u653e\u524d\u4e00\u65e5\u548c\u66f4\u65e9\u7684\u6570\u636e if not self . target_date or self . target_date >= _date : self . target_date = _date records = None if self . target_date == _date : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] records = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) else : records = await Security . load_securities_from_db ( self . target_date ) if records is None : return None results = [] for record in records : if self . _type_pattern is not None : if record [ \"type\" ] not in self . _type_pattern : continue if self . _name_pattern is not None : if record [ \"name\" ] . find ( self . _name_pattern ) == - 1 : continue if self . _alias_pattern is not None : if record [ \"alias\" ] . find ( self . _alias_pattern ) == - 1 : continue # \u521b\u4e1a\u677f\uff0c\u79d1\u521b\u677f\uff0cST\u6682\u65f6\u9650\u5b9a\u4e3a\u80a1\u7968\u7c7b\u578b if self . _only_cyb : if record [ \"type\" ] != \"stock\" or not ( record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ) ): continue if self . _only_kcb : if ( record [ \"type\" ] != \"stock\" or record [ \"code\" ] . startswith ( \"688\" ) is False ): continue if self . _only_st : if record [ \"type\" ] != \"stock\" or record [ \"alias\" ] . find ( \"ST\" ) == - 1 : continue if self . _exclude_cyb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ][: 3 ] in ( \"300\" , \"301\" ): continue if self . _exclude_st : if record [ \"type\" ] == \"stock\" and record [ \"alias\" ] . find ( \"ST\" ) != - 1 : continue if self . _exclude_kcb : if record [ \"type\" ] == \"stock\" and record [ \"code\" ] . startswith ( \"688\" ): continue # \u9000\u5e02\u6682\u4e0d\u9650\u5b9a\u662f\u5426\u4e3a\u80a1\u7968 if self . _include_exit is False : d1 = convert_nptime_to_datetime ( record [ \"end\" ]) . date () if d1 < self . target_date : continue results . append ( record [ \"code\" ]) # \u8fd4\u56de\u6240\u6709\u67e5\u8be2\u5230\u7684\u7ed3\u679c return results","title":"eval()"},{"location":"api/security/#omicron.models.security.Query.exclude_cyb","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_cyb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u521b\u4e1a\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_cyb = True self . _only_cyb = False return self","title":"exclude_cyb()"},{"location":"api/security/#omicron.models.security.Query.exclude_kcb","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_kcb ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664\u79d1\u521b\u677f\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_kcb = True self . _only_kcb = False return self","title":"exclude_kcb()"},{"location":"api/security/#omicron.models.security.Query.exclude_st","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968 Source code in omicron/models/security.py def exclude_st ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u6392\u9664ST\u7c7b\u578b\u7684\u80a1\u7968\"\"\" self . _exclude_st = True self . _only_st = False return self","title":"exclude_st()"},{"location":"api/security/#omicron.models.security.Query.include_exit","text":"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238 Source code in omicron/models/security.py def include_exit ( self ) -> \"Query\" : \"\"\"\u4ece\u8fd4\u56de\u7ed3\u679c\u4e2d\u5305\u542b\u5df2\u9000\u5e02\u7684\u8bc1\u5238\"\"\" self . _include_exit = True return self","title":"include_exit()"},{"location":"api/security/#omicron.models.security.Query.name_like","text":"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0 name \u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Parameters: Name Type Description Default name str \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" required Source code in omicron/models/security.py def name_like ( self , name : str ) -> \"Query\" : \"\"\"\u67e5\u627e\u80a1\u7968/\u8bc1\u5238\u540d\u79f0\u4e2d\u51fa\u73b0`name`\u7684\u54c1\u79cd \u6ce8\u610f\u8fd9\u91cc\u7684\u8bc1\u5238\u540d\u79f0\u5e76\u4e0d\u662f\u5176\u663e\u793a\u540d\u3002\u6bd4\u5982\u5bf9\u4e2d\u56fd\u5e73\u5b89000001.XSHE\u6765\u8bf4\uff0c\u5b83\u7684\u540d\u79f0\u662fZGPA\uff0c\u800c\u4e0d\u662f\u201c\u4e2d\u56fd\u5e73\u5b89\u201d\u3002 Args: name: \u5f85\u67e5\u627e\u7684\u540d\u5b57\uff0c\u6bd4\u5982\"ZGPA\" \"\"\" if name is None or len ( name ) == 0 : self . _name_pattern = None else : self . _name_pattern = name return self","title":"name_like()"},{"location":"api/security/#omicron.models.security.Query.only_cyb","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_cyb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u521b\u4e1a\u677f\u80a1\u7968\"\"\" self . _only_cyb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_cyb = False self . _only_kcb = False self . _only_st = False return self","title":"only_cyb()"},{"location":"api/security/#omicron.models.security.Query.only_kcb","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968 Source code in omicron/models/security.py def only_kcb ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542b\u79d1\u521b\u677f\u80a1\u7968\"\"\" self . _only_kcb = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_kcb = False self . _only_cyb = False self . _only_st = False return self","title":"only_kcb()"},{"location":"api/security/#omicron.models.security.Query.only_st","text":"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238 Source code in omicron/models/security.py def only_st ( self ) -> \"Query\" : \"\"\"\u8fd4\u56de\u7ed3\u679c\u4e2d\u53ea\u5305\u542bST\u7c7b\u578b\u7684\u8bc1\u5238\"\"\" self . _only_st = True # \u9ad8\u4f18\u5148\u7ea7 self . _exclude_st = False self . _only_kcb = False self . _only_cyb = False return self","title":"only_st()"},{"location":"api/security/#omicron.models.security.Query.types","text":"\u9009\u62e9\u7c7b\u578b\u5728 types \u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Parameters: Name Type Description Default types List[str] \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 required Source code in omicron/models/security.py def types ( self , types : List [ str ]) -> \"Query\" : \"\"\"\u9009\u62e9\u7c7b\u578b\u5728`types`\u4e2d\u7684\u8bc1\u5238\u54c1\u79cd Args: types: \u6709\u6548\u7684\u7c7b\u578b\u5305\u62ec: \u5bf9\u80a1\u7968\u6307\u6570\u800c\u8a00\u662f\uff08'index', 'stock'\uff09\uff0c\u5bf9\u57fa\u91d1\u800c\u8a00\u5219\u662f\uff08'etf', 'fjb', 'mmf', 'reits', 'fja', 'fjm', 'lof'\uff09 \"\"\" if types is None or isinstance ( types , List ) is False : return self if len ( types ) == 0 : self . _type_pattern = [ \"index\" , \"stock\" ] else : tmp = set ( types ) self . _type_pattern = list ( tmp ) return self","title":"types()"},{"location":"api/security/#omicron.models.security.Security","text":"Source code in omicron/models/security.py class Security : _securities = [] _securities_date = None _security_types = set () _stocks = [] @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None @classmethod async def get_security_types ( cls ): if cls . _security_types : return list ( cls . _security_types ) else : return None @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None @classmethod def fuzzy_match_ex ( cls , query : str ) -> Dict [ str , Tuple ]: # fixme: \u6b64\u65b9\u6cd5\u4e0eStock.fuzzy_match\u91cd\u590d\uff0c\u5e76\u4e14\u8fdb\u884c\u4e86\u7c7b\u578b\u9650\u5236\uff0c\u4f7f\u5f97\u5176\u4e0d\u9002\u5408\u653e\u5728Security\u91cc\uff0c\u4ee5\u53ca\u4f5c\u4e3a\u4e00\u4e2a\u901a\u7528\u65b9\u6cd5 query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"code\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"name\" ] . startswith ( query ) and sec [ \"type\" ] == \"stock\" } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _securities if sec [ \"alias\" ] . find ( query ) != - 1 and sec [ \"type\" ] == \"stock\" } @classmethod async def info ( cls , code , date = None ): _obj = await cls . query_security_via_date ( code , date ) if _obj is None : return None # \"_time\", \"code\", \"type\", \"alias\", \"end\", \"ipo\", \"name\" d1 = convert_nptime_to_datetime ( _obj [ \"ipo\" ]) . date () d2 = convert_nptime_to_datetime ( _obj [ \"end\" ]) . date () return { \"type\" : _obj [ \"type\" ], \"display_name\" : _obj [ \"alias\" ], \"alias\" : _obj [ \"alias\" ], \"end\" : d2 , \"start\" : d1 , \"name\" : _obj [ \"name\" ], } @classmethod async def name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"name\" ] @classmethod async def alias ( cls , code , date = None ): return await cls . display_name ( code , date ) @classmethod async def display_name ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"alias\" ] @classmethod async def start_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"ipo\" ]) . date () @classmethod async def end_date ( cls , code , date = None ): _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return convert_nptime_to_datetime ( _security [ \"end\" ]) . date () @classmethod async def security_type ( cls , code , date = None ) -> SecurityType : _security = await cls . query_security_via_date ( code , date ) if _security is None : return None return _security [ \"type\" ] @classmethod async def query_security_via_date ( cls , code : str , date : datetime . date = None ): if date is None : # \u4ece\u5185\u5b58\u4e2d\u67e5\u627e\uff0c\u5982\u679c\u7f13\u5b58\u4e2d\u7684\u6570\u636e\u5df2\u66f4\u65b0\uff0c\u91cd\u65b0\u52a0\u8f7d\u5230\u5185\u5b58 date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : date = arrow . get ( date_in_cache ) . date () if date > cls . _securities_date : await cls . load_securities () results = cls . _securities [ cls . _securities [ \"code\" ] == code ] else : # \u4eceinfluxdb\u67e5\u627e date = tf . day_shift ( date , 0 ) results = await cls . load_securities_from_db ( date , code ) if results is not None and len ( results ) > 0 : return results [ 0 ] else : return None @classmethod def select ( cls , date : datetime . date = None ) -> Query : if date is None : return Query ( target_date = None ) else : return Query ( target_date = date ) @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" )) @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] ) @classmethod async def load_securities_from_db ( cls , target_date : datetime . date , code : str = None ): if target_date is None : return None client = get_influx_client () measurement = \"security_list\" flux = ( Flux () . measurement ( measurement ) . range ( target_date , target_date ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : # \"_time\", \"code\", \"code, alias, name, start, end, type\" _securities = np . array ( [ tuple ( x [ \"info\" ] . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) return _securities else : return None @classmethod async def get_datescope_from_db ( cls ): # fixme: \u51fd\u6570\u540d\u65e0\u6cd5\u53cd\u6620\u7528\u9014\uff0c\u9700\u8981\u589e\u52a0\u6587\u6863\u6ce8\u91ca\uff0c\u8bf4\u660e\u8be5\u51fd\u6570\u7684\u4f5c\u7528,\u6216\u8005\u4e0d\u5e94\u8be5\u51fa\u73b0\u5728\u6b64\u7c7b\u4e2d\uff1f client = get_influx_client () measurement = \"security_list\" date1 = arrow . get ( \"2005-01-01\" ) . date () date2 = arrow . now () . naive . date () flux = ( Flux () . measurement ( measurement ) . range ( date1 , date2 ) . bucket ( client . _bucket ) . tags ({ \"code\" : \"000001.XSHE\" }) ) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return None , None ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" ], time_col = \"_time\" , engine = \"c\" ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : d1 = convert_nptime_to_datetime ( secs [ 0 ][ \"_time\" ]) d2 = convert_nptime_to_datetime ( secs [ len ( secs ) - 1 ][ \"_time\" ]) return d1 . date (), d2 . date () else : return None , None @classmethod async def _notify_special_bonusnote ( cls , code , note , cancel_date ): # fixme: \u8fd9\u4e2a\u51fd\u6570\u5e94\u8be5\u51fa\u73b0\u5728omega\u4e2d\uff1f default_cancel_date = datetime . date ( 2099 , 1 , 1 ) # \u9ed8\u8ba4\u65e0\u53d6\u6d88\u516c\u544a # report this special event to notify user if cancel_date != default_cancel_date : ding ( \"security %s , bonus_cancel_pub_date %s \" % ( code , cancel_date )) if note . find ( \"\u6d41\u901a\" ) != - 1 : # \u68c0\u67e5\u662f\u5426\u6709\u201c\u6d41\u901a\u80a1\u201d\u6587\u5b57 ding ( \"security %s , special xrxd note: %s \" % ( code , note )) @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ]) @classmethod async def _load_xrxd_from_db ( cls , code , dt_start : datetime . date , dt_end : datetime . date ): if dt_start is None or dt_end is None : return [] client = get_influx_client () measurement = \"security_xrxd_reports\" flux = ( Flux () . measurement ( measurement ) . range ( dt_start , dt_end ) . bucket ( client . _bucket ) . fields ([ \"info\" ]) ) if code is not None and len ( code ) > 0 : flux . tags ({ \"code\" : code }) data = await client . query ( flux ) if len ( data ) == 2 : # \\r\\n return [] ds = DataframeDeserializer ( sort_values = \"_time\" , usecols = [ \"_time\" , \"code\" , \"info\" ], time_col = \"_time\" , engine = \"c\" , ) actual = ds ( data ) secs = actual . to_records ( index = False ) if len ( secs ) != 0 : _reports = np . array ( [ tuple ( x [ \"info\" ] . split ( \"|\" )) for x in secs ], dtype = xrxd_info_dtype ) return _reports else : return [] @classmethod async def get_xrxd_info ( cls , dt : datetime . date , code : str = None ): if dt is None : return None # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) reports = await cls . _load_xrxd_from_db ( code , dt , dt ) if len ( reports ) == 0 : return None readable_reports = [] for report in reports : xr_date = convert_nptime_to_datetime ( report [ 1 ]) . date () readable_reports . append ( { \"code\" : report [ 0 ], \"xr_date\" : xr_date , \"bonus\" : report [ 3 ], \"dividend\" : report [ 4 ], \"transfer\" : report [ 5 ], \"bonusnote\" : report [ 2 ], } ) return readable_reports","title":"Security"},{"location":"api/security/#omicron.models.security.Security.get_stock","text":"\u6839\u636e code \u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 1 2 item = Security . get_stock ( \"000001.XSHE\" ) print ( item [ \"alias\" ]) \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Parameters: Name Type Description Default code \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 required Returns: Type Description numpy.ndarray[Any, numpy.dtype[[('code', 'O'), ('alias', 'O'), ('name', 'O'), ('ipo', 'datetime64[s]'), ('end', 'datetime64[s]'), ('type', 'O')]]] \u7c7b\u578b\u4e3a security_info_dtype \u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 Source code in omicron/models/security.py @classmethod def get_stock ( cls , code ) -> NDArray [ security_info_dtype ]: \"\"\"\u6839\u636e`code`\u6765\u67e5\u627e\u5bf9\u5e94\u7684\u80a1\u7968\uff08\u542b\u6307\u6570\uff09\u5bf9\u8c61\u4fe1\u606f\u3002 \u5982\u679c\u60a8\u53ea\u6709\u80a1\u7968\u4ee3\u7801\uff0c\u60f3\u77e5\u9053\u8be5\u4ee3\u7801\u5bf9\u5e94\u7684\u80a1\u7968\u540d\u79f0\u3001\u522b\u540d\uff08\u663e\u793a\u540d\uff09\u3001\u4e0a\u5e02\u65e5\u671f\u7b49\u4fe1\u606f\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u6b64\u65b9\u6cd5\u6765\u83b7\u53d6\u76f8\u5173\u4fe1\u606f\u3002 \u8fd4\u56de\u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002\u60a8\u53ef\u4ee5\u8c61\u5b57\u5178\u4e00\u6837\u5b58\u53d6\u5b83\uff0c\u6bd4\u5982 ```python item = Security.get_stock(\"000001.XSHE\") print(item[\"alias\"]) ``` \u663e\u793a\u4e3a\"\u5e73\u5b89\u94f6\u884c\" Args: code: \u5f85\u67e5\u8be2\u7684\u80a1\u7968/\u6307\u6570\u4ee3\u7801 Returns: \u7c7b\u578b\u4e3a`security_info_dtype`\u7684numpy\u6570\u7ec4\uff0c\u4f46\u4ec5\u5305\u542b\u4e00\u4e2a\u5143\u7d20 \"\"\" if len ( cls . _securities ) == 0 : return None tmp = cls . _securities [ cls . _securities [ \"code\" ] == code ] if len ( tmp ) > 0 : if tmp [ \"type\" ] in [ \"stock\" , \"index\" ]: return tmp [ 0 ] return None","title":"get_stock()"},{"location":"api/security/#omicron.models.security.Security.init","text":"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Exceptions: Type Description DataNotReadyError \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Source code in omicron/models/security.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316Security. \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Raises: DataNotReadyError: \u5982\u679comicron\u672a\u521d\u59cb\u5316\uff0c\u6216\u8005cache\u4e2d\u672a\u52a0\u8f7d\u6700\u65b0\u8bc1\u5238\u5217\u8868\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38\u3002 \"\"\" # read all securities from redis, 7111 records now # {'index', 'stock'} # {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if len ( cls . _securities ) > 100 : return True secs = await cls . load_securities () if secs is None or len ( secs ) == 0 : # pragma: no cover raise DataNotReadyError ( \"No securities in cache, make sure you have called omicron.init() first.\" ) print ( \"init securities done\" ) return True","title":"init()"},{"location":"api/security/#omicron.models.security.Security.load_securities","text":"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7 omicron.init \uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 Source code in omicron/models/security.py @classmethod async def load_securities ( cls ): \"\"\"\u52a0\u8f7d\u6240\u6709\u8bc1\u5238\u7684\u4fe1\u606f\uff0c\u5e76\u7f13\u5b58\u5230\u5185\u5b58\u4e2d \u4e00\u822c\u800c\u8a00\uff0comicron\u7684\u4f7f\u7528\u8005\u65e0\u987b\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u5b83\u4f1a\u5728omicron\u521d\u59cb\u5316\uff08\u901a\u8fc7`omicron.init`\uff09\u65f6\uff0c\u88ab\u81ea\u52a8\u8c03\u7528\u3002 \"\"\" secs = await cache . security . lrange ( \"security:all\" , 0 , - 1 ) if len ( secs ) != 0 : # using np.datetime64[s] _securities = np . array ( [ tuple ( x . split ( \",\" )) for x in secs ], dtype = security_info_dtype ) # \u66f4\u65b0\u8bc1\u5238\u7c7b\u578b\u5217\u8868 cls . _securities = _securities cls . _security_types = set ( _securities [ \"type\" ]) cls . _stocks = _securities [ ( _securities [ \"type\" ] == \"stock\" ) | ( _securities [ \"type\" ] == \"index\" ) ] logger . info ( \" %d securities loaded, types: %s \" , len ( _securities ), cls . _security_types ) date_in_cache = await cache . security . get ( \"security:latest_date\" ) if date_in_cache is not None : cls . _securities_date = arrow . get ( date_in_cache ) . date () else : cls . _securities_date = datetime . date . today () return _securities else : # pragma: no cover return None","title":"load_securities()"},{"location":"api/security/#omicron.models.security.Security.save_securities","text":"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default securities List[str] \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 required Source code in omicron/models/security.py @classmethod async def save_securities ( cls , securities : List [ str ], dt : datetime . date ): \"\"\"\u4fdd\u5b58\u6307\u5b9a\u7684\u8bc1\u5238\u4fe1\u606f\u5230\u7f13\u5b58\u4e2d\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: securities: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} if dt is None or len ( securities ) == 0 : return measurement = \"security_list\" client = get_influx_client () # code, alias, name, start, end, type security_list = np . array ( [ ( dt , x [ 0 ], f \" { x [ 0 ] } , { x [ 1 ] } , { x [ 2 ] } , { x [ 3 ] } , { x [ 4 ] } , { x [ 5 ] } \" ) for x in securities ], dtype = security_db_dtype , ) await client . save ( security_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ] )","title":"save_securities()"},{"location":"api/security/#omicron.models.security.Security.save_xrxd_reports","text":"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Parameters: Name Type Description Default reports List[str] \u5206\u7ea2\u9001\u80a1\u516c\u544a required Source code in omicron/models/security.py @classmethod async def save_xrxd_reports ( cls , reports : List [ str ], dt : datetime . date ): # fixme: \u6b64\u51fd\u6570\u5e94\u8be5\u5c5e\u4e8eomega? \"\"\"\u4fdd\u5b581\u5e74\u5185\u7684\u5206\u7ea2\u9001\u80a1\u4fe1\u606f\uff0c\u5e76\u4e14\u5b58\u5165influxdb\uff0c\u5b9a\u65f6job\u8c03\u7528\u672c\u63a5\u53e3 Args: reports: \u5206\u7ea2\u9001\u80a1\u516c\u544a \"\"\" # code(0), a_xr_date, board_plan_bonusnote, bonus_ratio_rmb(3), dividend_ratio, transfer_ratio(5), # at_bonus_ratio_rmb(6), report_date, plan_progress, implementation_bonusnote, bonus_cancel_pub_date(10) if len ( reports ) == 0 or dt is None : return # read reports from db and convert to dict map reports_in_db = {} dt_start = dt - datetime . timedelta ( days = 366 ) # \u5f80\u524d\u56de\u6eaf366\u5929 dt_end = dt + datetime . timedelta ( days = 366 ) # \u5f80\u540e\u5ef6\u957f366\u5929 existing_records = await cls . _load_xrxd_from_db ( None , dt_start , dt_end ) for record in existing_records : code = record [ 0 ] if code not in reports_in_db : reports_in_db [ code ] = [ record ] else : reports_in_db [ code ] . append ( record ) records = [] # \u51c6\u5907\u5199\u5165db for x in reports : code = x [ 0 ] note = x [ 2 ] cancel_date = x [ 10 ] existing_items = reports_in_db . get ( code , None ) if existing_items is None : # \u65b0\u8bb0\u5f55 record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) else : new_record = True for item in existing_items : existing_date = convert_nptime_to_datetime ( item [ 1 ]) . date () if existing_date == x [ 1 ]: # \u5982\u679cxr_date\u76f8\u540c\uff0c\u4e0d\u66f4\u65b0 new_record = False continue if new_record : record = ( x [ 1 ], x [ 0 ], f \" { x [ 0 ] } | { x [ 1 ] } | { x [ 2 ] } | { x [ 3 ] } | { x [ 4 ] } | { x [ 5 ] } | { x [ 6 ] } | { x [ 7 ] } | { x [ 8 ] } | { x [ 9 ] } | { x [ 10 ] } \" , ) records . append ( record ) await cls . _notify_special_bonusnote ( code , note , cancel_date ) logger . info ( \"save_xrxd_reports, %d records to be saved\" , len ( records )) if len ( records ) == 0 : return measurement = \"security_xrxd_reports\" client = get_influx_client () # a_xr_date(_time), code(tag), info report_list = np . array ( records , dtype = security_db_dtype ) await client . save ( report_list , measurement , time_key = \"frame\" , tag_keys = [ \"code\" ])","title":"save_xrxd_reports()"},{"location":"api/security/#omicron.models.security.Security.update_secs_cache","text":"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Parameters: Name Type Description Default dt date \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f required securities List[Tuple] \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b required Source code in omicron/models/security.py @classmethod async def update_secs_cache ( cls , dt : datetime . date , securities : List [ Tuple ]): \"\"\"\u66f4\u65b0\u8bc1\u5238\u5217\u8868\u5230\u7f13\u5b58\u6570\u636e\u5e93\u4e2d Args: dt: \u8bc1\u5238\u5217\u8868\u5f52\u5c5e\u7684\u65e5\u671f securities: \u8bc1\u5238\u5217\u8868, \u5143\u7d20\u4e3a\u5143\u7ec4\uff0c\u5206\u522b\u4e3a\u4ee3\u7801\u3001\u522b\u540d\u3001\u540d\u79f0\u3001IPO\u65e5\u671f\u3001\u9000\u5e02\u65e5\u548c\u8bc1\u5238\u7c7b\u578b \"\"\" # stock: {'index', 'stock'} # funds: {'fjb', 'mmf', 'reits', 'fja', 'fjm'} # {'etf', 'lof'} key = \"security:all\" pipeline = cache . security . pipeline () pipeline . delete ( key ) for code , alias , name , start , end , _type in securities : pipeline . rpush ( key , f \" { code } , { alias } , { name } , { start } ,\" f \" { end } , { _type } \" ) await pipeline . execute () logger . info ( \"all securities saved to cache %s , %d secs\" , key , len ( securities )) # update latest date info await cache . security . set ( \"security:latest_date\" , dt . strftime ( \"%Y-%m- %d \" ))","title":"update_secs_cache()"},{"location":"api/stock/","text":"Stock ( Security ) \u00b6 Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 Source code in omicron/models/stock.py class Stock ( Security ): \"\"\" Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 \"\"\" _is_cache_empty = True def __init__ ( self , code : str ): self . _code = code self . _stock = self . get_stock ( code ) assert self . _stock , \"\u7cfb\u7edf\u4e2d\u4e0d\u5b58\u5728\u8be5code\" ( _ , self . _display_name , self . _name , ipo , end , _type ) = self . _stock self . _start_date = convert_nptime_to_datetime ( ipo ) . date () self . _end_date = convert_nptime_to_datetime ( end ) . date () self . _type = SecurityType ( _type ) @classmethod def choose_listed ( cls , dt : datetime . date , types : List [ str ] = [ \"stock\" , \"index\" ]): cond = np . array ([ False ] * len ( cls . _stocks )) dt = datetime . datetime . combine ( dt , datetime . time ()) for type_ in types : cond |= cls . _stocks [ \"type\" ] == type_ result = cls . _stocks [ cond ] result = result [ result [ \"end\" ] > dt ] result = result [ result [ \"ipo\" ] <= dt ] # result = np.array(result, dtype=cls.stock_info_dtype) return result [ \"code\" ] . tolist () @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } def __str__ ( self ): return f \" { self . display_name } [ { self . code } ]\" @property def ipo_date ( self ) -> datetime . date : return self . _start_date @property def display_name ( self ) -> str : return self . _display_name @property def name ( self ) -> str : return self . _name @property def end_date ( self ) -> datetime . date : return self . _end_date @property def code ( self ) -> str : return self . _code @property def sim_code ( self ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , self . code ) @property def security_type ( self ) -> SecurityType : \"\"\"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: SecurityType: [description] \"\"\" return self . _type @staticmethod def simplify_code ( code ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , code ) @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None def days_since_ipo ( self ) -> int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) @staticmethod def qfq ( bars : BarsArray ) -> BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def batch_get_day_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars @classmethod async def get_bars ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise @classmethod async def _get_persisted_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`start`\u5230`end`\u533a\u95f4\u67d0\u652f\u80a1\u7968\u505c\u724c\uff0c\u5219\u4f1a\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" end = end or datetime . datetime . now () keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _get_persisted_bars_n ( cls , code : str , frame_type : FrameType , n : int , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u622a\u6b62\u5230`end`\u7684`n`\u6761\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`end`\u672a\u6307\u5b9a\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u800c\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u5f97\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`bars_dtype`\u7684numpy\u6570\u7ec4 \"\"\" # check is needed since tags accept List as well assert isinstance ( code , str ), \"`code` must be a string\" end = end or datetime . datetime . now () closed_end = tf . floor ( end , frame_type ) start = tf . shift ( closed_end , - min ( 2 * n , n + 20 ), frame_type ) keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) . latest ( n ) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _batch_get_persisted_bars_n ( cls , codes : List [ str ], frame_type : FrameType , n : int , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u622a\u6b62`end`\u65f6\u7684`n`\u6761\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002\u5982\u679c\u4e0d\u5b58\u5728\u6ee1\u8db3\u6307\u5b9a\u6761\u4ef6\u7684\u67e5\u8be2\u7ed3\u679c\uff0c\u5c06\u8fd4\u56de\u7a7a\u7684DataFrame\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u7684\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u5f53\u524d\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * min ( n + 20 , 2 * n ) > max_query_size : raise BadParameterError ( f \"codes\u7684\u6570\u91cf\u548cn\u7684\u4e58\u79ef\u8d85\u8fc7\u4e86influxdb\u7684\u6700\u5927\u67e5\u8be2\u6570\u91cf\u9650\u5236 { max_query_size } \" ) end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) begin = tf . shift ( close_end , - 1 * min ( n + 20 , n * 2 ), frame_type ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) . latest ( n ) ) if codes is not None : assert isinstance ( codes , list ), \"`codes` must be a list or None\" flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () return await client . query ( flux , deserializer ) @classmethod async def _batch_get_persisted_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , begin : Frame , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u5728`begin`\u548c`end`\u4e4b\u95f4\u7684\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u5c06\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 \u6ce8\u610f\uff0c\u8fd4\u56de\u7684\u6570\u636e\u6709\u53ef\u80fd\u4e0d\u662f\u7b49\u957f\u7684\uff0c\u56e0\u4e3a\u6709\u7684\u80a1\u7968\u53ef\u80fd\u505c\u724c\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b begin: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" end = end or datetime . datetime . now () n = tf . count_frames ( begin , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * n > max_query_size : raise BadParameterError ( f \"asked records is { len ( codes ) * n } , which is too large than { max_query_size } \" ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) ) flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () df = await client . query ( flux , deserializer ) return df @classmethod async def batch_cache_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ]): \"\"\"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 Raises: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" if frame_type == FrameType . DAY : await cls . batch_cache_unclosed_bars ( frame_type , bars ) return pl = cache . security . pipeline () for code , bars in bars . items (): key = f \"bars: { frame_type . value } : { code } \" for bar in bars : frame = tf . time2int ( bar [ \"frame\" ] . item ()) val = [ * bar ] val [ 0 ] = frame pl . hset ( key , frame , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def batch_cache_unclosed_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ] ): # pragma: no cover \"\"\"\u7f13\u5b58\u672a\u6536\u76d8\u76845\u300115\u300130\u300160\u5206\u949f\u7ebf\u53ca\u65e5\u7ebf\u3001\u5468\u7ebf\u3001\u6708\u7ebf Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002bars\u4e0d\u80fd\u4e3aNone\uff0c\u6216\u8005empty\u3002 Raise: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" pl = cache . security . pipeline () key = f \"bars: { frame_type . value } :unclosed\" convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int for code , bar in bars . items (): val = [ * bar [ 0 ]] val [ 0 ] = convert ( bar [ \"frame\" ][ 0 ] . item ()) # \u65f6\u95f4\u8f6c\u6362 pl . hset ( key , code , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True @classmethod def _deserialize_cached_bars ( cls , raw : List [ str ], ft : FrameType ) -> BarsArray : \"\"\"\u4eceredis\u4e2d\u53cd\u5e8f\u5217\u5316\u7f13\u5b58\u7684\u6570\u636e \u5982\u679c`raw`\u7a7a\u6570\u7ec4\u6216\u8005\u5143\u7d20\u4e3a`None`\uff0c\u5219\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: raw: redis\u4e2d\u7684\u7f13\u5b58\u6570\u636e ft: \u5e27\u7c7b\u578b sort: \u662f\u5426\u9700\u8981\u91cd\u65b0\u6392\u5e8f\uff0c\u7f3a\u7701\u4e3aFalse Returns: BarsArray: \u884c\u60c5\u6570\u636e \"\"\" fix_date = False if ft in tf . minute_level_frames : convert = tf . int2time else : convert = tf . int2date fix_date = True recs = [] # it's possible to treat raw as csv and use pandas to parse, however, the performance is 10 times worse than this method for raw_rec in raw : if raw_rec is None : continue f , o , h , l , c , v , m , fac = raw_rec . split ( \",\" ) if fix_date : f = f [: 8 ] recs . append ( ( convert ( f ), float ( o ), float ( h ), float ( l ), float ( c ), float ( v ), float ( m ), float ( fac ), ) ) return np . array ( recs , dtype = bars_dtype ) @classmethod async def _batch_get_cached_bars_n ( cls , frame_type : FrameType , n : int , end : Frame = None , codes : List [ str ] = None ) -> BarsPanel : \"\"\"\u6279\u91cf\u83b7\u53d6\u5728cache\u4e2d\u622a\u6b62`end`\u7684`n`\u4e2abars\u3002 \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: frame_type: \u65f6\u95f4\u5e27\u7c7b\u578b n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868 end: \u622a\u6b62\u65f6\u95f4, \u5982\u679c\u4e3aNone Returns: BarsPanel: \u884c\u60c5\u6570\u636e \"\"\" # \u8c03\u7528\u8005\u81ea\u5df1\u4fdd\u8bc1end\u5728\u7f13\u5b58\u4e2d cols = list ( bars_dtype_with_code . names ) if frame_type in tf . day_level_frames : key = f \"bars: { frame_type . value } :unclosed\" if codes is None : recs = await cache . security . hgetall ( key ) codes = list ( recs . keys ()) recs = recs . values () else : recs = await cache . security . hmget ( key , * codes ) barss = cls . _deserialize_cached_bars ( recs , frame_type ) if barss . size > 0 : if len ( barss ) != len ( codes ): # issue 39, \u5982\u679c\u67d0\u652f\u7968\u5f53\u5929\u505c\u724c\uff0c\u5219\u7f13\u5b58\u4e2d\u5c06\u4e0d\u4f1a\u6709\u5b83\u7684\u8bb0\u5f55\uff0c\u6b64\u65f6\u9700\u8981\u79fb\u9664\u5176\u4ee3\u7801 codes = [ codes [ i ] for i , item in enumerate ( recs ) if item is not None ] barss = numpy_append_fields ( barss , \"code\" , codes , [( \"code\" , \"O\" )]) return barss [ cols ] . astype ( bars_dtype_with_code ) else : return np . array ([], dtype = bars_dtype_with_code ) else : end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) all_bars = [] if codes is None : keys = await cache . security . keys ( f \"bars: { frame_type . value } :*[^unclosed]\" ) codes = [ key . split ( \":\" )[ - 1 ] for key in keys ] else : keys = [ f \"bars: { frame_type . value } : { code } \" for code in codes ] if frame_type != FrameType . MIN1 : unclosed = await cache . security . hgetall ( f \"bars: { frame_type . value } :unclosed\" ) else : unclosed = {} pl = cache . security . pipeline () frames = tf . get_frames_by_count ( close_end , n , frame_type ) for key in keys : pl . hmget ( key , * frames ) all_closed = await pl . execute () for code , raw in zip ( codes , all_closed ): raw . append ( unclosed . get ( code )) barss = cls . _deserialize_cached_bars ( raw , frame_type ) barss = numpy_append_fields ( barss , \"code\" , [ code ] * len ( barss ), [( \"code\" , \"O\" )] ) barss = barss [ cols ] . astype ( bars_dtype_with_code ) all_bars . append ( barss [ barss [ \"frame\" ] <= end ][ - n :]) try : return np . concatenate ( all_bars ) except ValueError as e : logger . exception ( e ) return np . array ([], dtype = bars_dtype_with_code ) @classmethod async def _get_cached_bars_n ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u7f13\u5b58\u4e2d\u83b7\u53d6\u6307\u5b9a\u4ee3\u7801\u7684\u884c\u60c5\u6570\u636e \u5b58\u53d6\u903b\u8f91\u662f\uff0c\u4ece`end`\u6307\u5b9a\u7684\u65f6\u95f4\u5411\u524d\u53d6`n`\u6761\u8bb0\u5f55\u3002`end`\u4e0d\u5e94\u8be5\u5927\u4e8e\u5f53\u524d\u7cfb\u7edf\u65f6\u95f4\uff0c\u5e76\u4e14\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u6765\u7684\u8d77\u59cb\u65f6\u95f4\u5e94\u8be5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u3002\u5426\u5219\uff0c\u4e24\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u8bb0\u5f55\u6570\u90fd\u5c06\u5c0f\u4e8e`n`\u3002 \u5982\u679c`end`\u4e0d\u5904\u4e8e`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u7ed3\u675f\u4f4d\u7f6e\uff0c\u4e14\u5c0f\u4e8e\u5f53\u524d\u5df2\u7f13\u5b58\u7684\u672a\u6536\u76d8bar\u65f6\u95f4\uff0c\u5219\u4f1a\u8fd4\u56de\u524d\u4e00\u4e2a\u5df2\u6536\u76d8\u7684\u6570\u636e\uff0c\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u4e2d\u8fd8\u5c06\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e\u3002 args: code: \u8bc1\u5238\u4ee3\u7801\uff0c\u6bd4\u5982000001.XSHE n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 frame_type: \u5e27\u7c7b\u578b end: \u7ed3\u675f\u5e27\uff0c\u5982\u679c\u4e3aNone\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 returns: \u5143\u7d20\u7c7b\u578b\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002\u5982\u679c\u6ca1\u6709\u6570\u636e\uff0c\u5219\u8fd4\u56de\u7a7andarray\u3002 \"\"\" # 50 times faster than arrow.now().floor('second') end = end or datetime . datetime . now () . replace ( second = 0 , microsecond = 0 ) if frame_type in tf . minute_level_frames : cache_start = tf . first_min_frame ( end . date (), frame_type ) closed = tf . floor ( end , frame_type ) frames = ( tf . get_frames ( cache_start , closed , frame_type ))[ - n :] if len ( frames ) == 0 : recs = np . empty ( shape = ( 0 ,), dtype = bars_dtype ) else : key = f \"bars: { frame_type . value } : { code } \" recs = await cache . security . hmget ( key , * frames ) recs = cls . _deserialize_cached_bars ( recs , frame_type ) if closed < end : # for unclosed key = f \"bars: { frame_type . value } :unclosed\" unclosed = await cache . security . hget ( key , code ) unclosed = cls . _deserialize_cached_bars ([ unclosed ], frame_type ) if len ( unclosed ) == 0 : return recs [ - n :] if end < unclosed [ 0 ][ \"frame\" ] . item (): # \u5982\u679cunclosed\u4e3a9:36, \u8c03\u7528\u8005\u8981\u6c42\u53d69:29\u76845m\u6570\u636e\uff0c\u5219\u53d6\u5230\u7684unclosed\u4e0d\u5408\u8981\u6c42\uff0c\u629b\u5f03\u3002\u4f3c\u4e4e\u6ca1\u6709\u66f4\u597d\u7684\u65b9\u6cd5\u68c0\u6d4bend\u4e0eunclosed\u7684\u5173\u7cfb return recs [ - n :] else : bars = np . concatenate (( recs , unclosed )) return bars [ - n :] else : return recs [ - n :] else : # \u65e5\u7ebf\u53ca\u4ee5\u4e0a\u7ea7\u522b\uff0c\u4ec5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u672a\u6536\u76d8\u6570\u636e key = f \"bars: { frame_type . value } :unclosed\" rec = await cache . security . hget ( key , code ) return cls . _deserialize_cached_bars ([ rec ], frame_type ) @classmethod async def cache_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): \"\"\"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note: \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:{code}`\u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" # \u8f6c\u6362\u65f6\u95f4\u4e3aint convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int key = f \"bars: { frame_type . value } : { code } \" pl = cache . security . pipeline () for bar in bars : val = [ * bar ] val [ 0 ] = convert ( bar [ \"frame\" ] . item ()) pl . hset ( key , val [ 0 ], \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def cache_unclosed_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): # pragma: no cover \"\"\"\u5c06\u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 \u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:unclosed`\u4e3akey, {code}\u4e3afield\u7684hashmap\u4e2d\u3002 \u5c3d\u7ba1`bars`\u88ab\u58f0\u660e\u4e3aBarsArray\uff0c\u4f46\u5b9e\u9645\u4e0a\u5e94\u8be5\u53ea\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" converter = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int assert len ( bars ) == 1 , \"unclosed bars should only have one record\" key = f \"bars: { frame_type . value } :unclosed\" bar = bars [ 0 ] val = [ * bar ] val [ 0 ] = converter ( bar [ \"frame\" ] . item ()) await cache . security . hset ( key , code , \",\" . join ( map ( str , val ))) @classmethod async def persist_bars ( cls , frame_type : FrameType , bars : Union [ Dict [ str , BarsArray ], BarsArray , pd . DataFrame ], ): \"\"\"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c`bars`\u7c7b\u578b\u4e3aDict,\u5219key\u4e3a`code`\uff0cvalue\u4e3a`bars`\u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219`bars`\u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a`coretypes.bars_dtype` + (\"code\", \"O\")\u6784\u6210\u3002 Args: frame_type: the frame type of the bars bars: the bars to be persisted Raises: InfluxDBWriteError: if influxdb write failed \"\"\" client = get_influx_client () measurement = cls . _measurement_name ( frame_type ) logger . info ( \"persisting bars to influxdb: %s , %d secs\" , measurement , len ( bars )) if isinstance ( bars , dict ): for code , value in bars . items (): await client . save ( value , measurement , global_tags = { \"code\" : code }, time_key = \"frame\" ) else : await client . save ( bars , measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) @classmethod def _measurement_name ( cls , frame_type ): return f \"stock_bars_ { frame_type . value } \" @classmethod def _resample_from_min1 ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece1\u5206\u949f\u7ebf\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u91cd\u91c7\u6837\u540e\u7684\u6570\u636e\u53ea\u5305\u542bframe, open, high, low, close, volume, amount, factor\uff0c\u65e0\u8bba\u4f20\u5165\u6570\u636e\u662f\u5426\u8fd8\u6709\u522b\u7684\u5b57\u6bb5\uff0c\u5b83\u4eec\u90fd\u5c06\u88ab\u4e22\u5f03\u3002 resampling 240\u6839\u5206\u949f\u7ebf\u52305\u5206\u949f\u5927\u7ea6\u9700\u8981100\u5fae\u79d2\u3002 TODO\uff1a \u5982\u679c`bars`\u4e2d\u5305\u542bnan\u600e\u4e48\u5904\u7406\uff1f \"\"\" if bars [ 0 ][ \"frame\" ] . item () . minute != 31 : raise ValueError ( \"resampling from 1min must start from 9:31\" ) if to_frame not in ( FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , FrameType . DAY , ): raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) bins_len = { FrameType . MIN5 : 5 , FrameType . MIN15 : 15 , FrameType . MIN30 : 30 , FrameType . MIN60 : 60 , FrameType . DAY : 240 , }[ to_frame ] bins = len ( bars ) // bins_len npart1 = bins * bins_len part1 = bars [: npart1 ] . reshape (( - 1 , bins_len )) part2 = bars [ npart1 :] open_pos = np . arange ( bins ) * bins_len close_pos = np . arange ( 1 , bins + 1 ) * bins_len - 1 if len ( bars ) > bins_len * bins : close_pos = np . append ( close_pos , len ( bars ) - 1 ) resampled = np . empty (( bins + 1 ,), dtype = bars_dtype ) else : resampled = np . empty (( bins ,), dtype = bars_dtype ) resampled [: bins ][ \"open\" ] = bars [ open_pos ][ \"open\" ] resampled [: bins ][ \"high\" ] = np . max ( part1 [ \"high\" ], axis = 1 ) resampled [: bins ][ \"low\" ] = np . min ( part1 [ \"low\" ], axis = 1 ) resampled [: bins ][ \"volume\" ] = np . sum ( part1 [ \"volume\" ], axis = 1 ) resampled [: bins ][ \"amount\" ] = np . sum ( part1 [ \"amount\" ], axis = 1 ) if len ( part2 ): resampled [ - 1 ][ \"open\" ] = part2 [ \"open\" ][ 0 ] resampled [ - 1 ][ \"high\" ] = np . max ( part2 [ \"high\" ]) resampled [ - 1 ][ \"low\" ] = np . min ( part2 [ \"low\" ]) resampled [ - 1 ][ \"volume\" ] = np . sum ( part2 [ \"volume\" ]) resampled [ - 1 ][ \"amount\" ] = np . sum ( part2 [ \"amount\" ]) cols = [ \"frame\" , \"close\" , \"factor\" ] resampled [ cols ] = bars [ close_pos ][ cols ] if to_frame == FrameType . DAY : resampled [ \"frame\" ] = bars [ - 1 ][ \"frame\" ] . item () . date () return resampled @classmethod def _resample_from_day ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece\u65e5\u7ebf\u8f6c\u6362\u6210`to_frame`\u7684\u884c\u60c5\u6570\u636e Args: bars (BarsArray): [description] to_frame (FrameType): [description] Returns: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" rules = { \"frame\" : \"last\" , \"open\" : \"first\" , \"high\" : \"max\" , \"low\" : \"min\" , \"close\" : \"last\" , \"volume\" : \"sum\" , \"amount\" : \"sum\" , \"factor\" : \"last\" , } if to_frame == FrameType . WEEK : freq = \"W-Fri\" elif to_frame == FrameType . MONTH : freq = \"M\" elif to_frame == FrameType . QUARTER : freq = \"Q\" elif to_frame == FrameType . YEAR : freq = \"A\" else : raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) df = pd . DataFrame ( bars ) df . index = pd . to_datetime ( bars [ \"frame\" ]) df = df . resample ( freq ) . agg ( rules ) bars = np . array ( df . to_records ( index = False ), dtype = bars_dtype ) # filter out data like (None, nan, ...) return bars [ np . isfinite ( bars [ \"close\" ])] @classmethod async def _get_price_limit_in_cache ( cls , code : str , begin : datetime . date , end : datetime . date ): date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if date_str : date_in_cache = arrow . get ( date_str ) . date () if date_in_cache < begin or date_in_cache > end : return None else : return None dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] hp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .high_limit\" ) lp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .low_limit\" ) if hp is None or lp is None : return None else : return np . array ([( date_in_cache , hp , lp )], dtype = dtype ) @classmethod async def get_trade_price_limits ( cls , code : str , begin : Frame , end : Frame ) -> BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result @classmethod async def reset_price_limits_cache ( cls , cache_only : bool , dt : datetime . date = None ): if cache_only is False : date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if not date_str : return # skip clear action if date not found in cache date_in_cache = arrow . get ( date_str ) . date () if dt is None or date_in_cache != dt : # \u66f4\u65b0\u7684\u65f6\u95f4\u548ccache\u7684\u65f6\u95f4\u76f8\u540c\uff0c\u5219\u6e05\u9664cache return # skip clear action await cache . _security_ . delete ( TRADE_PRICE_LIMITS ) await cache . _security_ . delete ( TRADE_PRICE_LIMITS_DATE ) @classmethod async def save_trade_price_limits ( cls , price_limits : LimitPriceOnlyBarsArray , to_cache : bool ): \"\"\"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Args: price_limits: \u8981\u4fdd\u5b58\u7684\u6da8\u8dcc\u505c\u4ef7\u683c\u6570\u636e\u3002 to_cache: \u662f\u4fdd\u5b58\u5230\u7f13\u5b58\u4e2d\uff0c\u8fd8\u662f\u4fdd\u5b58\u5230\u6301\u4e45\u5316\u5b58\u50a8\u4e2d \"\"\" if len ( price_limits ) == 0 : return if to_cache : # \u6bcf\u4e2a\u4ea4\u6613\u65e5\u4e0a\u53489\u70b9\u66f4\u65b0\u4e24\u6b21 pl = cache . _security_ . pipeline () for row in price_limits : # .item convert np.float64 to python float pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .high_limit\" , row [ \"high_limit\" ] . item (), ) pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .low_limit\" , row [ \"low_limit\" ] . item (), ) dt = price_limits [ - 1 ][ \"frame\" ] pl . set ( TRADE_PRICE_LIMITS_DATE , dt . strftime ( \"%Y-%m- %d \" )) await pl . execute () else : # to influxdb\uff0c \u6bcf\u4e2a\u4ea4\u6613\u65e5\u7684\u7b2c\u4e8c\u5929\u65e9\u4e0a2\u70b9\u4fdd\u5b58 client = get_influx_client () await client . save ( price_limits , cls . _measurement_name ( FrameType . DAY ), tag_keys = \"code\" , time_key = \"frame\" , ) @classmethod async def trade_price_limit_flags ( cls , code : str , start : datetime . date , end : datetime . date ) -> Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data security_type : SecurityType property readonly \u00b6 \u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: Type Description SecurityType [description] batch_cache_bars ( frame_type , bars ) async classmethod \u00b6 \u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Parameters: Name Type Description Default frame_type FrameType \u5e27\u7c7b\u578b required bars Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars batch_get_min_level_bars_in_range ( codes , frame_type , start , end , fq = True ) classmethod \u00b6 \u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1 get_bars \u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a 1 2 async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) \u5982\u679c end \u4e0d\u5728 frame_type \u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c end \u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230 tf.floor(end, frame_type) \u3002 Parameters: Name Type Description Default codes List[str] \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 required frame_type FrameType \u5e27\u7c7b\u578b required start Union[datetime.date, datetime.datetime] \u8d77\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 required fq bool \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True Returns: Type Description Generator[Dict[str, BarsArray], None, None] \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars cache_bars ( code , frame_type , bars ) async classmethod \u00b6 \u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5 bars:{frame_type.value}:{code} \u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Parameters: Name Type Description Default code str the full qualified code of a security or index required frame_type FrameType frame type of the bars required bars numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) format_code ( code ) staticmethod \u00b6 \u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 Source code in omicron/models/stock.py @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None fuzzy_match ( query ) classmethod \u00b6 \u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Parameters: Name Type Description Default query str \u67e5\u8be2\u5b57\u7b26\u4e32 required Returns: Type Description Dict[str, Tuple] \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) Source code in omicron/models/stock.py @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } get_bars ( code , n , frame_type , end = None , fq = True , unclosed = True ) async classmethod \u00b6 \u83b7\u53d6\u5230 end \u4e3a\u6b62\u7684 n \u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3 n \u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4 end \u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e end \u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Parameters: Name Type Description Default code str \u8bc1\u5238\u4ee3\u7801 required n int \u8bb0\u5f55\u6570 required frame_type FrameType \u5e27\u7c7b\u578b required end Union[datetime.date, datetime.datetime] \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 None fq \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a True \u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True unclosed \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. True Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise get_bars_in_range ( code , frame_type , start , end = None , fq = True , unclosed = True ) async classmethod \u00b6 \u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08 code \uff09\u5728[ start , end ]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a frame_type \u7684\u884c\u60c5\u6570\u636e\u3002 Parameters: Name Type Description Default code \u8bc1\u5238\u4ee3\u7801 required frame_type \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b required start \u8d77\u59cb\u65f6\u95f4 required end \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 None fq \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c True unclosed \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e True Source code in omicron/models/stock.py @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars get_latest_price ( codes ) async classmethod \u00b6 \u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Parameters: Name Type Description Default codes Iterable[str] \u4ee3\u7801\u5217\u8868 required Returns: Type Description List[str] \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 Source code in omicron/models/stock.py @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data get_trade_price_limits ( code , begin , end ) async classmethod \u00b6 \u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Parameters: Name Type Description Default code \u4e2a\u80a1\u4ee3\u7801 required begin \u5f00\u59cb\u65e5\u671f required end \u7ed3\u675f\u65e5\u671f required Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result persist_bars ( frame_type , bars ) async classmethod \u00b6 \u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c bars \u7c7b\u578b\u4e3aDict,\u5219key\u4e3a code \uff0cvalue\u4e3a bars \u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219 bars \u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a coretypes.bars_dtype + (\"code\", \"O\")\u6784\u6210\u3002 Parameters: Name Type Description Default frame_type FrameType the frame type of the bars required bars Union[Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars resample ( bars , from_frame , to_frame ) classmethod \u00b6 \u5c06\u539f\u6765\u4e3a from_frame \u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a to_frame \u7684\u884c\u60c5\u6570\u636e \u5982\u679c to_frame \u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c to_frame \u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c from_frame \u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Parameters: Name Type Description Default bars BarsArray \u884c\u60c5\u6570\u636e required from_frame FrameType \u8f6c\u6362\u524d\u7684FrameType required to_frame FrameType \u8f6c\u6362\u540e\u7684FrameType required Returns: Type Description BarsArray \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) reset_cache () async classmethod \u00b6 \u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True save_trade_price_limits ( price_limits , to_cache ) async classmethod \u00b6 \u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Parameters: Name Type Description Default price_limits numpy.ndarray[Any, numpy.dtype[dtype([('frame', 'O'), ('code', 'O'), ('high_limit', ' Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) trade_price_limit_flags_ex ( code , start , end ) async classmethod \u00b6 \u83b7\u53d6\u80a1\u7968 code \u5728 [start, end] \u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Parameters: Name Type Description Default code str \u80a1\u7968\u4ee3\u7801 required start date \u8d77\u59cb\u65e5\u671f required end date \u7ed3\u675f\u65e5\u671f required Returns: Type Description Dict[datetime.date, Tuple[bool, bool]] \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict Source code in omicron/models/stock.py @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results","title":"stock"},{"location":"api/stock/#omicron.models.stock.Stock","text":"Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 Source code in omicron/models/stock.py class Stock ( Security ): \"\"\" Stock\u5bf9\u8c61\u7528\u4e8e\u5f52\u96c6\u67d0\u652f\u8bc1\u5238\uff08\u80a1\u7968\u548c\u6307\u6570\uff0c\u4e0d\u5305\u62ec\u5176\u5b83\u6295\u8d44\u54c1\u79cd\uff09\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u6bd4\u5982\u884c\u60c5\u6570\u636e\uff08OHLC\u7b49\uff09\u3001\u5e02\u503c\u6570\u636e\u3001\u6240\u5c5e\u6982\u5ff5\u5206\u7c7b\u7b49\u3002 \"\"\" _is_cache_empty = True def __init__ ( self , code : str ): self . _code = code self . _stock = self . get_stock ( code ) assert self . _stock , \"\u7cfb\u7edf\u4e2d\u4e0d\u5b58\u5728\u8be5code\" ( _ , self . _display_name , self . _name , ipo , end , _type ) = self . _stock self . _start_date = convert_nptime_to_datetime ( ipo ) . date () self . _end_date = convert_nptime_to_datetime ( end ) . date () self . _type = SecurityType ( _type ) @classmethod def choose_listed ( cls , dt : datetime . date , types : List [ str ] = [ \"stock\" , \"index\" ]): cond = np . array ([ False ] * len ( cls . _stocks )) dt = datetime . datetime . combine ( dt , datetime . time ()) for type_ in types : cond |= cls . _stocks [ \"type\" ] == type_ result = cls . _stocks [ cond ] result = result [ result [ \"end\" ] > dt ] result = result [ result [ \"ipo\" ] <= dt ] # result = np.array(result, dtype=cls.stock_info_dtype) return result [ \"code\" ] . tolist () @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 } def __str__ ( self ): return f \" { self . display_name } [ { self . code } ]\" @property def ipo_date ( self ) -> datetime . date : return self . _start_date @property def display_name ( self ) -> str : return self . _display_name @property def name ( self ) -> str : return self . _name @property def end_date ( self ) -> datetime . date : return self . _end_date @property def code ( self ) -> str : return self . _code @property def sim_code ( self ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , self . code ) @property def security_type ( self ) -> SecurityType : \"\"\"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: SecurityType: [description] \"\"\" return self . _type @staticmethod def simplify_code ( code ) -> str : return re . sub ( r \"\\.XSH[EG]\" , \"\" , code ) @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None def days_since_ipo ( self ) -> int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ()) @staticmethod def qfq ( bars : BarsArray ) -> BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def batch_get_day_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars @classmethod async def get_bars ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise @classmethod async def _get_persisted_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u4ecb\u4e8e[`start`, `end`]\u95f4\u7684\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`start`\u5230`end`\u533a\u95f4\u67d0\u652f\u80a1\u7968\u505c\u724c\uff0c\u5219\u4f1a\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" end = end or datetime . datetime . now () keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _get_persisted_bars_n ( cls , code : str , frame_type : FrameType , n : int , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u6301\u4e45\u5316\u6570\u636e\u5e93\u4e2d\u83b7\u53d6\u622a\u6b62\u5230`end`\u7684`n`\u6761\u884c\u60c5\u8bb0\u5f55 \u5982\u679c`end`\u672a\u6307\u5b9a\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u800c\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u5f97\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 Returns: \u8fd4\u56dedtype\u4e3a`bars_dtype`\u7684numpy\u6570\u7ec4 \"\"\" # check is needed since tags accept List as well assert isinstance ( code , str ), \"`code` must be a string\" end = end or datetime . datetime . now () closed_end = tf . floor ( end , frame_type ) start = tf . shift ( closed_end , - min ( 2 * n , n + 20 ), frame_type ) keep_cols = [ \"_time\" ] + list ( bars_cols [ 1 :]) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( start , end ) . measurement ( measurement ) . fields ( keep_cols ) . tags ({ \"code\" : code }) . latest ( n ) ) serializer = DataframeDeserializer ( encoding = \"utf-8\" , names = [ \"_\" , \"table\" , \"result\" , \"frame\" , \"code\" , \"amount\" , \"close\" , \"factor\" , \"high\" , \"low\" , \"open\" , \"volume\" , ], engine = \"c\" , skiprows = 0 , header = 0 , usecols = bars_cols , parse_dates = [ \"frame\" ], ) client = get_influx_client () result = await client . query ( flux , serializer ) return result . to_records ( index = False ) . astype ( bars_dtype ) @classmethod async def _batch_get_persisted_bars_n ( cls , codes : List [ str ], frame_type : FrameType , n : int , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u622a\u6b62`end`\u65f6\u7684`n`\u6761\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002\u5982\u679c\u4e0d\u5b58\u5728\u6ee1\u8db3\u6307\u5b9a\u6761\u4ef6\u7684\u67e5\u8be2\u7ed3\u679c\uff0c\u5c06\u8fd4\u56de\u7a7a\u7684DataFrame\u3002 \u57fa\u4e8einfluxdb\u67e5\u8be2\u7684\u7279\u6027\uff0c\u5728\u67e5\u8be2\u524d\uff0c\u5fc5\u987b\u5148\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u8d77\u59cb\u65f6\u95f4\uff0c\u4f46\u5982\u679c\u5728\u6b64\u671f\u95f4\u67d0\u4e9b\u80a1\u7968\u6709\u505c\u724c\uff0c\u5219\u65e0\u6cd5\u8fd4\u56de\u7684\u6570\u636e\u5c06\u5c0f\u4e8e`n`\u3002\u5982\u679c\u8d77\u59cb\u65f6\u95f4\u8bbe\u7f6e\u7684\u8db3\u591f\u65e9\uff0c\u867d\u7136\u80fd\u6ee1\u8db3\u8fd4\u56de\u6570\u636e\u6761\u6570\u7684\u8981\u6c42\uff0c\u4f46\u4f1a\u5e26\u6765\u6027\u80fd\u4e0a\u7684\u635f\u5931\u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5728\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\u65f6\uff0c\u4e0d\u662f\u4f7f\u7528`n`\u6765\u8ba1\u7b97\uff0c\u800c\u662f\u4f7f\u7528\u4e86`min(n * 2, n + 20)`\u6765\u8ba1\u7b97\u8d77\u59cb\u65f6\u95f4\uff0c\u8fd9\u6837\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u80fd\u591f\u4fdd\u8bc1\u8fd4\u56de\u6570\u636e\u7684\u6761\u6570\u4e3a`n`\u6761\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b n: \u8fd4\u56de\u7ed3\u679c\u6570\u91cf end: \u7ed3\u675f\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u5f53\u524d\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * min ( n + 20 , 2 * n ) > max_query_size : raise BadParameterError ( f \"codes\u7684\u6570\u91cf\u548cn\u7684\u4e58\u79ef\u8d85\u8fc7\u4e86influxdb\u7684\u6700\u5927\u67e5\u8be2\u6570\u91cf\u9650\u5236 { max_query_size } \" ) end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) begin = tf . shift ( close_end , - 1 * min ( n + 20 , n * 2 ), frame_type ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) . latest ( n ) ) if codes is not None : assert isinstance ( codes , list ), \"`codes` must be a list or None\" flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () return await client . query ( flux , deserializer ) @classmethod async def _batch_get_persisted_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , begin : Frame , end : Frame = None ) -> pd . DataFrame : \"\"\"\u4ece\u6301\u4e45\u5316\u5b58\u50a8\u4e2d\u83b7\u53d6`codes`\u6307\u5b9a\u7684\u4e00\u6279\u80a1\u7968\u5728`begin`\u548c`end`\u4e4b\u95f4\u7684\u8bb0\u5f55\u3002 \u8fd4\u56de\u7684\u6570\u636e\u5c06\u6309`frame`\u8fdb\u884c\u5347\u5e8f\u6392\u5217\u3002 \u6ce8\u610f\uff0c\u8fd4\u56de\u7684\u6570\u636e\u6709\u53ef\u80fd\u4e0d\u662f\u7b49\u957f\u7684\uff0c\u56e0\u4e3a\u6709\u7684\u80a1\u7968\u53ef\u80fd\u505c\u724c\u3002 Args: codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868\u3002 frame_type: \u5e27\u7c7b\u578b begin: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 Returns: DataFrame, columns\u4e3a`code`, `frame`, `open`, `high`, `low`, `close`, `volume`, `amount`, `factor` \"\"\" end = end or datetime . datetime . now () n = tf . count_frames ( begin , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) if len ( codes ) * n > max_query_size : raise BadParameterError ( f \"asked records is { len ( codes ) * n } , which is too large than { max_query_size } \" ) # influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\u7c7b\u4f3c\u4e8eCSV\uff0c\u5176\u5217\u987a\u5e8f\u4e3a_, result_alias, table_seq, _time, tags, fields,\u5176\u4e2dtags\u548cfields\u90fd\u662f\u5347\u5e8f\u6392\u5217 keep_cols = [ \"code\" ] + list ( bars_cols ) names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" ] # influxdb will return fields in the order of name ascending parallel names . extend ( sorted ( bars_cols [ 1 :])) measurement = cls . _measurement_name ( frame_type ) flux = ( Flux () . bucket ( cfg . influxdb . bucket_name ) . range ( begin , end ) . measurement ( measurement ) . fields ( keep_cols ) ) flux . tags ({ \"code\" : codes }) deserializer = DataframeDeserializer ( names = names , usecols = keep_cols , encoding = \"utf-8\" , time_col = \"frame\" , engine = \"c\" , ) client = get_influx_client () df = await client . query ( flux , deserializer ) return df @classmethod async def batch_cache_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ]): \"\"\"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 Raises: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" if frame_type == FrameType . DAY : await cls . batch_cache_unclosed_bars ( frame_type , bars ) return pl = cache . security . pipeline () for code , bars in bars . items (): key = f \"bars: { frame_type . value } : { code } \" for bar in bars : frame = tf . time2int ( bar [ \"frame\" ] . item ()) val = [ * bar ] val [ 0 ] = frame pl . hset ( key , frame , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def batch_cache_unclosed_bars ( cls , frame_type : FrameType , bars : Dict [ str , BarsArray ] ): # pragma: no cover \"\"\"\u7f13\u5b58\u672a\u6536\u76d8\u76845\u300115\u300130\u300160\u5206\u949f\u7ebf\u53ca\u65e5\u7ebf\u3001\u5468\u7ebf\u3001\u6708\u7ebf Args: frame_type: \u5e27\u7c7b\u578b bars: \u884c\u60c5\u6570\u636e\uff0c\u5176key\u4e3a\u80a1\u7968\u4ee3\u7801\uff0c\u5176value\u4e3adtype\u4e3a`bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002bars\u4e0d\u80fd\u4e3aNone\uff0c\u6216\u8005empty\u3002 Raise: RedisError: \u5982\u679c\u5728\u6267\u884c\u8fc7\u7a0b\u4e2d\u53d1\u751f\u9519\u8bef\uff0c\u5219\u629b\u51fa\u4ee5\u6b64\u5f02\u5e38\u4e3a\u57fa\u7c7b\u7684\u5404\u79cd\u5f02\u5e38\uff0c\u5177\u4f53\u53c2\u8003aioredis\u76f8\u5173\u6587\u6863\u3002 \"\"\" pl = cache . security . pipeline () key = f \"bars: { frame_type . value } :unclosed\" convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int for code , bar in bars . items (): val = [ * bar [ 0 ]] val [ 0 ] = convert ( bar [ \"frame\" ][ 0 ] . item ()) # \u65f6\u95f4\u8f6c\u6362 pl . hset ( key , code , \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True @classmethod def _deserialize_cached_bars ( cls , raw : List [ str ], ft : FrameType ) -> BarsArray : \"\"\"\u4eceredis\u4e2d\u53cd\u5e8f\u5217\u5316\u7f13\u5b58\u7684\u6570\u636e \u5982\u679c`raw`\u7a7a\u6570\u7ec4\u6216\u8005\u5143\u7d20\u4e3a`None`\uff0c\u5219\u8fd4\u56de\u7a7a\u6570\u7ec4\u3002 Args: raw: redis\u4e2d\u7684\u7f13\u5b58\u6570\u636e ft: \u5e27\u7c7b\u578b sort: \u662f\u5426\u9700\u8981\u91cd\u65b0\u6392\u5e8f\uff0c\u7f3a\u7701\u4e3aFalse Returns: BarsArray: \u884c\u60c5\u6570\u636e \"\"\" fix_date = False if ft in tf . minute_level_frames : convert = tf . int2time else : convert = tf . int2date fix_date = True recs = [] # it's possible to treat raw as csv and use pandas to parse, however, the performance is 10 times worse than this method for raw_rec in raw : if raw_rec is None : continue f , o , h , l , c , v , m , fac = raw_rec . split ( \",\" ) if fix_date : f = f [: 8 ] recs . append ( ( convert ( f ), float ( o ), float ( h ), float ( l ), float ( c ), float ( v ), float ( m ), float ( fac ), ) ) return np . array ( recs , dtype = bars_dtype ) @classmethod async def _batch_get_cached_bars_n ( cls , frame_type : FrameType , n : int , end : Frame = None , codes : List [ str ] = None ) -> BarsPanel : \"\"\"\u6279\u91cf\u83b7\u53d6\u5728cache\u4e2d\u622a\u6b62`end`\u7684`n`\u4e2abars\u3002 \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: frame_type: \u65f6\u95f4\u5e27\u7c7b\u578b n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 codes: \u8bc1\u5238\u4ee3\u7801\u5217\u8868 end: \u622a\u6b62\u65f6\u95f4, \u5982\u679c\u4e3aNone Returns: BarsPanel: \u884c\u60c5\u6570\u636e \"\"\" # \u8c03\u7528\u8005\u81ea\u5df1\u4fdd\u8bc1end\u5728\u7f13\u5b58\u4e2d cols = list ( bars_dtype_with_code . names ) if frame_type in tf . day_level_frames : key = f \"bars: { frame_type . value } :unclosed\" if codes is None : recs = await cache . security . hgetall ( key ) codes = list ( recs . keys ()) recs = recs . values () else : recs = await cache . security . hmget ( key , * codes ) barss = cls . _deserialize_cached_bars ( recs , frame_type ) if barss . size > 0 : if len ( barss ) != len ( codes ): # issue 39, \u5982\u679c\u67d0\u652f\u7968\u5f53\u5929\u505c\u724c\uff0c\u5219\u7f13\u5b58\u4e2d\u5c06\u4e0d\u4f1a\u6709\u5b83\u7684\u8bb0\u5f55\uff0c\u6b64\u65f6\u9700\u8981\u79fb\u9664\u5176\u4ee3\u7801 codes = [ codes [ i ] for i , item in enumerate ( recs ) if item is not None ] barss = numpy_append_fields ( barss , \"code\" , codes , [( \"code\" , \"O\" )]) return barss [ cols ] . astype ( bars_dtype_with_code ) else : return np . array ([], dtype = bars_dtype_with_code ) else : end = end or datetime . datetime . now () close_end = tf . floor ( end , frame_type ) all_bars = [] if codes is None : keys = await cache . security . keys ( f \"bars: { frame_type . value } :*[^unclosed]\" ) codes = [ key . split ( \":\" )[ - 1 ] for key in keys ] else : keys = [ f \"bars: { frame_type . value } : { code } \" for code in codes ] if frame_type != FrameType . MIN1 : unclosed = await cache . security . hgetall ( f \"bars: { frame_type . value } :unclosed\" ) else : unclosed = {} pl = cache . security . pipeline () frames = tf . get_frames_by_count ( close_end , n , frame_type ) for key in keys : pl . hmget ( key , * frames ) all_closed = await pl . execute () for code , raw in zip ( codes , all_closed ): raw . append ( unclosed . get ( code )) barss = cls . _deserialize_cached_bars ( raw , frame_type ) barss = numpy_append_fields ( barss , \"code\" , [ code ] * len ( barss ), [( \"code\" , \"O\" )] ) barss = barss [ cols ] . astype ( bars_dtype_with_code ) all_bars . append ( barss [ barss [ \"frame\" ] <= end ][ - n :]) try : return np . concatenate ( all_bars ) except ValueError as e : logger . exception ( e ) return np . array ([], dtype = bars_dtype_with_code ) @classmethod async def _get_cached_bars_n ( cls , code : str , n : int , frame_type : FrameType , end : Frame = None ) -> BarsArray : \"\"\"\u4ece\u7f13\u5b58\u4e2d\u83b7\u53d6\u6307\u5b9a\u4ee3\u7801\u7684\u884c\u60c5\u6570\u636e \u5b58\u53d6\u903b\u8f91\u662f\uff0c\u4ece`end`\u6307\u5b9a\u7684\u65f6\u95f4\u5411\u524d\u53d6`n`\u6761\u8bb0\u5f55\u3002`end`\u4e0d\u5e94\u8be5\u5927\u4e8e\u5f53\u524d\u7cfb\u7edf\u65f6\u95f4\uff0c\u5e76\u4e14\u6839\u636e`end`\u548c`n`\u8ba1\u7b97\u51fa\u6765\u7684\u8d77\u59cb\u65f6\u95f4\u5e94\u8be5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u3002\u5426\u5219\uff0c\u4e24\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u8bb0\u5f55\u6570\u90fd\u5c06\u5c0f\u4e8e`n`\u3002 \u5982\u679c`end`\u4e0d\u5904\u4e8e`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u7ed3\u675f\u4f4d\u7f6e\uff0c\u4e14\u5c0f\u4e8e\u5f53\u524d\u5df2\u7f13\u5b58\u7684\u672a\u6536\u76d8bar\u65f6\u95f4\uff0c\u5219\u4f1a\u8fd4\u56de\u524d\u4e00\u4e2a\u5df2\u6536\u76d8\u7684\u6570\u636e\uff0c\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u4e2d\u8fd8\u5c06\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e\u3002 args: code: \u8bc1\u5238\u4ee3\u7801\uff0c\u6bd4\u5982000001.XSHE n: \u8fd4\u56de\u8bb0\u5f55\u6761\u6570 frame_type: \u5e27\u7c7b\u578b end: \u7ed3\u675f\u5e27\uff0c\u5982\u679c\u4e3aNone\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 returns: \u5143\u7d20\u7c7b\u578b\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002\u5982\u679c\u6ca1\u6709\u6570\u636e\uff0c\u5219\u8fd4\u56de\u7a7andarray\u3002 \"\"\" # 50 times faster than arrow.now().floor('second') end = end or datetime . datetime . now () . replace ( second = 0 , microsecond = 0 ) if frame_type in tf . minute_level_frames : cache_start = tf . first_min_frame ( end . date (), frame_type ) closed = tf . floor ( end , frame_type ) frames = ( tf . get_frames ( cache_start , closed , frame_type ))[ - n :] if len ( frames ) == 0 : recs = np . empty ( shape = ( 0 ,), dtype = bars_dtype ) else : key = f \"bars: { frame_type . value } : { code } \" recs = await cache . security . hmget ( key , * frames ) recs = cls . _deserialize_cached_bars ( recs , frame_type ) if closed < end : # for unclosed key = f \"bars: { frame_type . value } :unclosed\" unclosed = await cache . security . hget ( key , code ) unclosed = cls . _deserialize_cached_bars ([ unclosed ], frame_type ) if len ( unclosed ) == 0 : return recs [ - n :] if end < unclosed [ 0 ][ \"frame\" ] . item (): # \u5982\u679cunclosed\u4e3a9:36, \u8c03\u7528\u8005\u8981\u6c42\u53d69:29\u76845m\u6570\u636e\uff0c\u5219\u53d6\u5230\u7684unclosed\u4e0d\u5408\u8981\u6c42\uff0c\u629b\u5f03\u3002\u4f3c\u4e4e\u6ca1\u6709\u66f4\u597d\u7684\u65b9\u6cd5\u68c0\u6d4bend\u4e0eunclosed\u7684\u5173\u7cfb return recs [ - n :] else : bars = np . concatenate (( recs , unclosed )) return bars [ - n :] else : return recs [ - n :] else : # \u65e5\u7ebf\u53ca\u4ee5\u4e0a\u7ea7\u522b\uff0c\u4ec5\u5728\u7f13\u5b58\u4e2d\u5b58\u5728\u672a\u6536\u76d8\u6570\u636e key = f \"bars: { frame_type . value } :unclosed\" rec = await cache . security . hget ( key , code ) return cls . _deserialize_cached_bars ([ rec ], frame_type ) @classmethod async def cache_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): \"\"\"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note: \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:{code}`\u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" # \u8f6c\u6362\u65f6\u95f4\u4e3aint convert = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int key = f \"bars: { frame_type . value } : { code } \" pl = cache . security . pipeline () for bar in bars : val = [ * bar ] val [ 0 ] = convert ( bar [ \"frame\" ] . item ()) pl . hset ( key , val [ 0 ], \",\" . join ( map ( str , val ))) await pl . execute () @classmethod async def cache_unclosed_bars ( cls , code : str , frame_type : FrameType , bars : BarsArray ): # pragma: no cover \"\"\"\u5c06\u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 \u672a\u7ed3\u675f\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5`bars:{frame_type.value}:unclosed`\u4e3akey, {code}\u4e3afield\u7684hashmap\u4e2d\u3002 \u5c3d\u7ba1`bars`\u88ab\u58f0\u660e\u4e3aBarsArray\uff0c\u4f46\u5b9e\u9645\u4e0a\u5e94\u8be5\u53ea\u5305\u542b\u4e00\u4e2a\u5143\u7d20\u3002 Args: code: the full qualified code of a security or index frame_type: frame type of the bars bars: the bars to cache, which is a numpy array of dtype `coretypes.bars_dtype` Raises: RedisError: if redis operation failed, see documentation of aioredis \"\"\" converter = tf . time2int if frame_type in tf . minute_level_frames else tf . date2int assert len ( bars ) == 1 , \"unclosed bars should only have one record\" key = f \"bars: { frame_type . value } :unclosed\" bar = bars [ 0 ] val = [ * bar ] val [ 0 ] = converter ( bar [ \"frame\" ] . item ()) await cache . security . hset ( key , code , \",\" . join ( map ( str , val ))) @classmethod async def persist_bars ( cls , frame_type : FrameType , bars : Union [ Dict [ str , BarsArray ], BarsArray , pd . DataFrame ], ): \"\"\"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c`bars`\u7c7b\u578b\u4e3aDict,\u5219key\u4e3a`code`\uff0cvalue\u4e3a`bars`\u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219`bars`\u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a`coretypes.bars_dtype` + (\"code\", \"O\")\u6784\u6210\u3002 Args: frame_type: the frame type of the bars bars: the bars to be persisted Raises: InfluxDBWriteError: if influxdb write failed \"\"\" client = get_influx_client () measurement = cls . _measurement_name ( frame_type ) logger . info ( \"persisting bars to influxdb: %s , %d secs\" , measurement , len ( bars )) if isinstance ( bars , dict ): for code , value in bars . items (): await client . save ( value , measurement , global_tags = { \"code\" : code }, time_key = \"frame\" ) else : await client . save ( bars , measurement , tag_keys = [ \"code\" ], time_key = \"frame\" ) @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" ) @classmethod def _measurement_name ( cls , frame_type ): return f \"stock_bars_ { frame_type . value } \" @classmethod def _resample_from_min1 ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece1\u5206\u949f\u7ebf\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u91cd\u91c7\u6837\u540e\u7684\u6570\u636e\u53ea\u5305\u542bframe, open, high, low, close, volume, amount, factor\uff0c\u65e0\u8bba\u4f20\u5165\u6570\u636e\u662f\u5426\u8fd8\u6709\u522b\u7684\u5b57\u6bb5\uff0c\u5b83\u4eec\u90fd\u5c06\u88ab\u4e22\u5f03\u3002 resampling 240\u6839\u5206\u949f\u7ebf\u52305\u5206\u949f\u5927\u7ea6\u9700\u8981100\u5fae\u79d2\u3002 TODO\uff1a \u5982\u679c`bars`\u4e2d\u5305\u542bnan\u600e\u4e48\u5904\u7406\uff1f \"\"\" if bars [ 0 ][ \"frame\" ] . item () . minute != 31 : raise ValueError ( \"resampling from 1min must start from 9:31\" ) if to_frame not in ( FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , FrameType . DAY , ): raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) bins_len = { FrameType . MIN5 : 5 , FrameType . MIN15 : 15 , FrameType . MIN30 : 30 , FrameType . MIN60 : 60 , FrameType . DAY : 240 , }[ to_frame ] bins = len ( bars ) // bins_len npart1 = bins * bins_len part1 = bars [: npart1 ] . reshape (( - 1 , bins_len )) part2 = bars [ npart1 :] open_pos = np . arange ( bins ) * bins_len close_pos = np . arange ( 1 , bins + 1 ) * bins_len - 1 if len ( bars ) > bins_len * bins : close_pos = np . append ( close_pos , len ( bars ) - 1 ) resampled = np . empty (( bins + 1 ,), dtype = bars_dtype ) else : resampled = np . empty (( bins ,), dtype = bars_dtype ) resampled [: bins ][ \"open\" ] = bars [ open_pos ][ \"open\" ] resampled [: bins ][ \"high\" ] = np . max ( part1 [ \"high\" ], axis = 1 ) resampled [: bins ][ \"low\" ] = np . min ( part1 [ \"low\" ], axis = 1 ) resampled [: bins ][ \"volume\" ] = np . sum ( part1 [ \"volume\" ], axis = 1 ) resampled [: bins ][ \"amount\" ] = np . sum ( part1 [ \"amount\" ], axis = 1 ) if len ( part2 ): resampled [ - 1 ][ \"open\" ] = part2 [ \"open\" ][ 0 ] resampled [ - 1 ][ \"high\" ] = np . max ( part2 [ \"high\" ]) resampled [ - 1 ][ \"low\" ] = np . min ( part2 [ \"low\" ]) resampled [ - 1 ][ \"volume\" ] = np . sum ( part2 [ \"volume\" ]) resampled [ - 1 ][ \"amount\" ] = np . sum ( part2 [ \"amount\" ]) cols = [ \"frame\" , \"close\" , \"factor\" ] resampled [ cols ] = bars [ close_pos ][ cols ] if to_frame == FrameType . DAY : resampled [ \"frame\" ] = bars [ - 1 ][ \"frame\" ] . item () . date () return resampled @classmethod def _resample_from_day ( cls , bars : BarsArray , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06`bars`\u4ece\u65e5\u7ebf\u8f6c\u6362\u6210`to_frame`\u7684\u884c\u60c5\u6570\u636e Args: bars (BarsArray): [description] to_frame (FrameType): [description] Returns: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" rules = { \"frame\" : \"last\" , \"open\" : \"first\" , \"high\" : \"max\" , \"low\" : \"min\" , \"close\" : \"last\" , \"volume\" : \"sum\" , \"amount\" : \"sum\" , \"factor\" : \"last\" , } if to_frame == FrameType . WEEK : freq = \"W-Fri\" elif to_frame == FrameType . MONTH : freq = \"M\" elif to_frame == FrameType . QUARTER : freq = \"Q\" elif to_frame == FrameType . YEAR : freq = \"A\" else : raise ValueError ( f \"unsupported to_frame: { to_frame } \" ) df = pd . DataFrame ( bars ) df . index = pd . to_datetime ( bars [ \"frame\" ]) df = df . resample ( freq ) . agg ( rules ) bars = np . array ( df . to_records ( index = False ), dtype = bars_dtype ) # filter out data like (None, nan, ...) return bars [ np . isfinite ( bars [ \"close\" ])] @classmethod async def _get_price_limit_in_cache ( cls , code : str , begin : datetime . date , end : datetime . date ): date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if date_str : date_in_cache = arrow . get ( date_str ) . date () if date_in_cache < begin or date_in_cache > end : return None else : return None dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] hp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .high_limit\" ) lp = await cache . _security_ . hget ( TRADE_PRICE_LIMITS , f \" { code } .low_limit\" ) if hp is None or lp is None : return None else : return np . array ([( date_in_cache , hp , lp )], dtype = dtype ) @classmethod async def get_trade_price_limits ( cls , code : str , begin : Frame , end : Frame ) -> BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result @classmethod async def reset_price_limits_cache ( cls , cache_only : bool , dt : datetime . date = None ): if cache_only is False : date_str = await cache . _security_ . get ( TRADE_PRICE_LIMITS_DATE ) if not date_str : return # skip clear action if date not found in cache date_in_cache = arrow . get ( date_str ) . date () if dt is None or date_in_cache != dt : # \u66f4\u65b0\u7684\u65f6\u95f4\u548ccache\u7684\u65f6\u95f4\u76f8\u540c\uff0c\u5219\u6e05\u9664cache return # skip clear action await cache . _security_ . delete ( TRADE_PRICE_LIMITS ) await cache . _security_ . delete ( TRADE_PRICE_LIMITS_DATE ) @classmethod async def save_trade_price_limits ( cls , price_limits : LimitPriceOnlyBarsArray , to_cache : bool ): \"\"\"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Args: price_limits: \u8981\u4fdd\u5b58\u7684\u6da8\u8dcc\u505c\u4ef7\u683c\u6570\u636e\u3002 to_cache: \u662f\u4fdd\u5b58\u5230\u7f13\u5b58\u4e2d\uff0c\u8fd8\u662f\u4fdd\u5b58\u5230\u6301\u4e45\u5316\u5b58\u50a8\u4e2d \"\"\" if len ( price_limits ) == 0 : return if to_cache : # \u6bcf\u4e2a\u4ea4\u6613\u65e5\u4e0a\u53489\u70b9\u66f4\u65b0\u4e24\u6b21 pl = cache . _security_ . pipeline () for row in price_limits : # .item convert np.float64 to python float pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .high_limit\" , row [ \"high_limit\" ] . item (), ) pl . hset ( TRADE_PRICE_LIMITS , f \" { row [ 'code' ] } .low_limit\" , row [ \"low_limit\" ] . item (), ) dt = price_limits [ - 1 ][ \"frame\" ] pl . set ( TRADE_PRICE_LIMITS_DATE , dt . strftime ( \"%Y-%m- %d \" )) await pl . execute () else : # to influxdb\uff0c \u6bcf\u4e2a\u4ea4\u6613\u65e5\u7684\u7b2c\u4e8c\u5929\u65e9\u4e0a2\u70b9\u4fdd\u5b58 client = get_influx_client () await client . save ( price_limits , cls . _measurement_name ( FrameType . DAY ), tag_keys = \"code\" , time_key = \"frame\" , ) @classmethod async def trade_price_limit_flags ( cls , code : str , start : datetime . date , end : datetime . date ) -> Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), ) @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data","title":"Stock"},{"location":"api/stock/#omicron.models.stock.Stock.security_type","text":"\u8fd4\u56de\u8bc1\u5238\u7c7b\u578b Returns: Type Description SecurityType [description]","title":"security_type"},{"location":"api/stock/#omicron.models.stock.Stock.batch_cache_bars","text":"\u7f13\u5b58\u5df2\u6536\u76d8\u7684\u5206\u949f\u7ebf\u548c\u65e5\u7ebf \u5f53\u7f13\u5b58\u65e5\u7ebf\u65f6\uff0c\u4ec5\u9650\u4e8e\u5f53\u65e5\u6536\u76d8\u540e\u7684\u7b2c\u4e00\u6b21\u540c\u6b65\u65f6\u8c03\u7528\u3002 Parameters: Name Type Description Default frame_type FrameType \u5e27\u7c7b\u578b required bars Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u65e5\u7ebf\u7ea7\u522b\uff08\u53731d, 1w, 1M)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_day_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" today = datetime . datetime . now () . date () # \u65e5\u7ebf\uff0cend\u4e0d\u7b49\u4e8e\u6700\u540e\u4ea4\u6613\u65e5\uff0c\u6b64\u65f6\u5df2\u65e0\u7f13\u5b58 if frame_type == FrameType . DAY and end == tf . floor ( today , frame_type ): from_cache = True elif frame_type != FrameType . DAY and start > tf . floor ( today , frame_type ): from_cache = True else : from_cache = False n = tf . count_frames ( start , end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( max_query_size // n , 1 ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] persisted = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) if from_cache : cached = await cls . _batch_get_cached_bars_n ( frame_type , 1 , end , batch_codes ) cached = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ persisted , cached ]) else : df = persisted for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars","title":"batch_get_day_level_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.batch_get_min_level_bars_in_range","text":"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1 get_bars \u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a 1 2 async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) \u5982\u679c end \u4e0d\u5728 frame_type \u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c end \u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230 tf.floor(end, frame_type) \u3002 Parameters: Name Type Description Default codes List[str] \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 required frame_type FrameType \u5e27\u7c7b\u578b required start Union[datetime.date, datetime.datetime] \u8d77\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 required fq bool \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True Returns: Type Description Generator[Dict[str, BarsArray], None, None] \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def batch_get_min_level_bars_in_range ( cls , codes : List [ str ], frame_type : FrameType , start : Frame , end : Frame , fq : bool = True , ) -> Generator [ Dict [ str , BarsArray ], None , None ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\uff08\u6307\u6570\uff09\u5728[start, end)\u65f6\u95f4\u6bb5\u5185\u7684\u884c\u60c5\u6570\u636e \u5982\u679c\u8981\u83b7\u53d6\u7684\u884c\u60c5\u6570\u636e\u662f\u5206\u949f\u7ea7\u522b\uff08\u53731m, 5m, 15m, 30m\u548c60m)\uff0c\u4f7f\u7528\u672c\u63a5\u53e3\u3002 \u505c\u724c\u6570\u636e\u5904\u7406\u8bf7\u89c1[get_bars][omicron.models.stock.Stock.get_bars]\u3002 \u672c\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f7f\u7528\u65b9\u6cd5\u793a\u4f8b\uff1a ``` async for code, bars in Stock.batch_get_min_level_bars_in_range(...): print(code, bars) ``` \u5982\u679c`end`\u4e0d\u5728`frame_type`\u6240\u5c5e\u7684\u8fb9\u754c\u70b9\u4e0a\uff0c\u90a3\u4e48\uff0c\u5982\u679c`end`\u5927\u4e8e\u7b49\u4e8e\u5f53\u524d\u7f13\u5b58\u672a\u6536\u76d8\u6570\u636e\u65f6\u95f4\uff0c\u5219\u5c06\u5305\u542b\u672a\u6536\u76d8\u6570\u636e\uff1b\u5426\u5219\uff0c\u8fd4\u56de\u7684\u8bb0\u5f55\u5c06\u622a\u6b62\u5230`tf.floor(end, frame_type)`\u3002 Args: codes: \u80a1\u7968/\u6307\u6570\u4ee3\u7801\u5217\u8868 frame_type: \u5e27\u7c7b\u578b start: \u8d77\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4\u3002\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4\u3002 fq: \u662f\u5426\u8fdb\u884c\u590d\u6743\uff0c\u5982\u679c\u662f\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. Returns: Generator[Dict[str, BarsArray], None, None]: \u8fed\u4ee3\u5668\uff0c\u6bcf\u6b21\u8fd4\u56de\u4e00\u4e2a\u5b57\u5178\uff0c\u5176\u4e2dkey\u4e3a\u4ee3\u7801\uff0cvalue\u4e3a\u884c\u60c5\u6570\u636e \"\"\" closed_end = tf . floor ( end , frame_type ) n = tf . count_frames ( start , closed_end , frame_type ) max_query_size = min ( cfg . influxdb . max_query_size , INFLUXDB_MAX_QUERY_SIZE ) batch_size = max ( 1 , max_query_size // n ) ff = tf . first_min_frame ( datetime . datetime . now (), frame_type ) for i in range ( 0 , len ( codes ), batch_size ): batch_codes = codes [ i : i + batch_size ] if end < ff : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , end ) part2 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) elif start >= ff : part1 = pd . DataFrame ([], columns = bars_dtype_with_code . names ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) cached = cached [ cached [ \"frame\" ] >= start ] part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) else : part1 = await cls . _batch_get_persisted_bars_in_range ( batch_codes , frame_type , start , ff ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 cached = await cls . _batch_get_cached_bars_n ( frame_type , n , end , batch_codes ) part2 = pd . DataFrame ( cached , columns = bars_dtype_with_code . names ) df = pd . concat ([ part1 , part2 ]) for code in batch_codes : filtered = df [ df [ \"code\" ] == code ][ bars_cols ] bars = filtered . to_records ( index = False ) . astype ( bars_dtype ) if fq : bars = cls . qfq ( bars ) yield code , bars","title":"batch_get_min_level_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.cache_bars","text":"\u5c06\u5f53\u671f\u5df2\u6536\u76d8\u7684\u884c\u60c5\u6570\u636e\u7f13\u5b58 Note \u5f53\u524d\u53ea\u7f13\u5b581\u5206\u949f\u6570\u636e\u3002\u5176\u5b83\u5206\u949f\u6570\u636e\uff0c\u90fd\u5728\u8c03\u7528\u65f6\uff0c\u901a\u8fc7resample\u4e34\u65f6\u5408\u6210\u3002 \u884c\u60c5\u6570\u636e\u7f13\u5b58\u5728\u4ee5 bars:{frame_type.value}:{code} \u4e3akey, {frame}\u4e3afield\u7684hashmap\u4e2d\u3002 Parameters: Name Type Description Default code str the full qualified code of a security or index required frame_type FrameType frame type of the bars required bars numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' int : \"\"\"\u83b7\u53d6\u4e0a\u5e02\u4ee5\u6765\u7ecf\u8fc7\u4e86\u591a\u5c11\u4e2a\u4ea4\u6613\u65e5 \u7531\u4e8e\u53d7\u4ea4\u6613\u65e5\u5386\u9650\u5236\uff082005\u5e741\u67084\u65e5\u4e4b\u524d\u7684\u4ea4\u6613\u65e5\u5386\u6ca1\u6709\uff09\uff0c\u5bf9\u4e8e\u5728\u4e4b\u524d\u4e0a\u5e02\u7684\u54c1\u79cd\uff0c\u90fd\u8fd4\u56de\u4ece2005\u5e741\u67084\u65e5\u8d77\u7684\u65e5\u671f\u3002 Returns: int: [description] \"\"\" epoch_start = arrow . get ( \"2005-01-04\" ) . date () ipo_day = self . ipo_date if self . ipo_date > epoch_start else epoch_start return tf . count_day_frames ( ipo_day , arrow . now () . date ())","title":"days_since_ipo()"},{"location":"api/stock/#omicron.models.stock.Stock.format_code","text":"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 Source code in omicron/models/stock.py @staticmethod def format_code ( code ) -> str : \"\"\"\u65b0\u4e09\u677f\u548c\u5317\u4ea4\u6240\u7684\u80a1\u7968, \u6682\u4e0d\u652f\u6301, \u9ed8\u8ba4\u8fd4\u56deNone \u4e0a\u8bc1A\u80a1: 600\u3001601\u3001603\u3001605 \u6df1\u8bc1A\u80a1: 000\u3001001 \u4e2d\u5c0f\u677f: 002\u3001003 \u521b\u4e1a\u677f: 300/301 \u79d1\u521b\u677f: 688 \u65b0\u4e09\u677f: 82\u300183\u300187\u300188\u3001430\u3001420\u3001400 \u5317\u4ea4\u6240: 43\u300183\u300187\u300188 \"\"\" if not code or len ( code ) != 6 : return None prefix = code [ 0 ] if prefix in ( \"0\" , \"3\" ): return f \" { code } .XSHE\" elif prefix == \"6\" : return f \" { code } .XSHG\" else : return None","title":"format_code()"},{"location":"api/stock/#omicron.models.stock.Stock.fuzzy_match","text":"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Parameters: Name Type Description Default query str \u67e5\u8be2\u5b57\u7b26\u4e32 required Returns: Type Description Dict[str, Tuple] \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) Source code in omicron/models/stock.py @classmethod def fuzzy_match ( cls , query : str ) -> Dict [ str , Tuple ]: \"\"\"\u5bf9\u80a1\u7968/\u6307\u6570\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u67e5\u627e query\u53ef\u4ee5\u662f\u80a1\u7968/\u6307\u6570\u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u5b57\u6bcd\uff08\u6309name\u67e5\u627e\uff09\uff0c\u4e5f\u53ef\u4ee5\u662f\u6c49\u5b57\uff08\u6309\u663e\u793a\u540d\u67e5\u627e\uff09 Args: query (str): \u67e5\u8be2\u5b57\u7b26\u4e32 Returns: Dict[str, Tuple]: \u67e5\u8be2\u7ed3\u679c\uff0c\u5176\u4e2dTuple\u4e3a(code, display_name, name, start, end, type) \"\"\" query = query . upper () if re . match ( r \"\\d+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"code\" ] . startswith ( query ) } elif re . match ( r \"[A-Z]+\" , query ): return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"name\" ] . startswith ( query ) } else : return { sec [ \"code\" ]: sec . tolist () for sec in cls . _stocks if sec [ \"alias\" ] . find ( query ) != - 1 }","title":"fuzzy_match()"},{"location":"api/stock/#omicron.models.stock.Stock.get_bars","text":"\u83b7\u53d6\u5230 end \u4e3a\u6b62\u7684 n \u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3 n \u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4 end \u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e end \u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Parameters: Name Type Description Default code str \u8bc1\u5238\u4ee3\u7801 required n int \u8bb0\u5f55\u6570 required frame_type FrameType \u5e27\u7c7b\u578b required end Union[datetime.date, datetime.datetime] \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 None fq \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a True \u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. True unclosed \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. True Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u83b7\u53d6\u5230`end`\u4e3a\u6b62\u7684`n`\u4e2a\u884c\u60c5\u6570\u636e\u3002 \u8fd4\u56de\u7684\u6570\u636e\u662f\u6309\u7167\u65f6\u95f4\u987a\u5e8f\u9012\u589e\u6392\u5e8f\u7684\u3002\u5728\u9047\u5230\u505c\u724c\u7684\u60c5\u51b5\u65f6\uff0c\u8be5\u65f6\u6bb5\u6570\u636e\u5c06\u88ab\u8df3\u8fc7\uff0c\u56e0\u6b64\u8fd4\u56de\u7684\u8bb0\u5f55\u53ef\u80fd\u4e0d\u662f\u4ea4\u6613\u65e5\u8fde\u7eed\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u4e0d\u8db3`n`\u4e2a\u3002 \u5982\u679c\u7cfb\u7edf\u5f53\u524d\u6ca1\u6709\u5230\u6307\u5b9a\u65f6\u95f4`end`\u7684\u6570\u636e\uff0c\u5c06\u5c3d\u6700\u5927\u52aa\u529b\u8fd4\u56de\u6570\u636e\u3002\u8c03\u7528\u8005\u53ef\u4ee5\u901a\u8fc7\u5224\u65ad\u6700\u540e\u4e00\u6761\u6570\u636e\u7684\u65f6\u95f4\u662f\u5426\u7b49\u4e8e`end`\u6765\u5224\u65ad\u662f\u5426\u83b7\u53d6\u5230\u4e86\u5168\u90e8\u6570\u636e\u3002 Args: code: \u8bc1\u5238\u4ee3\u7801 n: \u8bb0\u5f55\u6570 frame_type: \u5e27\u7c7b\u578b end: \u622a\u6b62\u65f6\u95f4,\u5982\u679c\u672a\u6307\u660e\uff0c\u5219\u53d6\u5f53\u524d\u65f6\u95f4 fq: \u662f\u5426\u5bf9\u8fd4\u56de\u8bb0\u5f55\u8fdb\u884c\u590d\u6743\u3002\u5982\u679c\u4e3a`True`\u7684\u8bdd\uff0c\u5219\u8fdb\u884c\u524d\u590d\u6743\u3002Defaults to True. unclosed: \u662f\u5426\u5305\u542b\u6700\u65b0\u672a\u6536\u76d8\u7684\u6570\u636e\uff1f Defaults to True. Returns: \u8fd4\u56dedtype\u4e3a`coretypes.bars_dtype`\u7684\u4e00\u7ef4numpy\u6570\u7ec4\u3002 \"\"\" now = datetime . datetime . now () try : cached = np . array ([], dtype = bars_dtype ) if frame_type in tf . day_level_frames : if end is None : end = now . date () elif type ( end ) == datetime . datetime : end = end . date () n0 = n if unclosed : cached = await cls . _get_cached_bars_n ( code , 1 , frame_type ) if cached . size > 0 : # \u5982\u679c\u7f13\u5b58\u7684\u672a\u6536\u76d8\u65e5\u671f > end\uff0c\u5219\u8be5\u7f13\u5b58\u4e0d\u662f\u9700\u8981\u7684 if cached [ 0 ][ \"frame\" ] . item () . date () > end : cached = np . array ([], dtype = bars_dtype ) else : n0 = n - 1 else : end = end or now closed_frame = tf . floor ( end , frame_type ) # fetch one more bar, in case we should discard unclosed bar cached = await cls . _get_cached_bars_n ( code , n + 1 , frame_type , end ) if not unclosed : cached = cached [ cached [ \"frame\" ] <= closed_frame ] # n bars we need fetch from persisted db n0 = n - cached . size if n0 > 0 : if cached . size > 0 : end0 = cached [ 0 ][ \"frame\" ] . item () else : end0 = end bars = await cls . _get_persisted_bars_n ( code , frame_type , n0 , end0 ) merged = np . concatenate (( bars , cached )) bars = merged [ - n :] else : bars = cached [ - n :] if fq : bars = cls . qfq ( bars ) return bars except Exception as e : logger . exception ( e ) logger . warning ( \"failed to get bars for %s , %s , %s , %s \" , code , n , frame_type , end ) raise","title":"get_bars()"},{"location":"api/stock/#omicron.models.stock.Stock.get_bars_in_range","text":"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08 code \uff09\u5728[ start , end ]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a frame_type \u7684\u884c\u60c5\u6570\u636e\u3002 Parameters: Name Type Description Default code \u8bc1\u5238\u4ee3\u7801 required frame_type \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b required start \u8d77\u59cb\u65f6\u95f4 required end \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 None fq \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c True unclosed \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e True Source code in omicron/models/stock.py @classmethod async def get_bars_in_range ( cls , code : str , frame_type : FrameType , start : Frame , end : Frame = None , fq = True , unclosed = True , ) -> BarsArray : \"\"\"\u83b7\u53d6\u6307\u5b9a\u8bc1\u5238\uff08`code`\uff09\u5728[`start`, `end`]\u671f\u95f4\u5e27\u7c7b\u578b\u4e3a`frame_type`\u7684\u884c\u60c5\u6570\u636e\u3002 Args: code : \u8bc1\u5238\u4ee3\u7801 frame_type : \u884c\u60c5\u6570\u636e\u7684\u5e27\u7c7b\u578b start : \u8d77\u59cb\u65f6\u95f4 end : \u7ed3\u675f\u65f6\u95f4,\u5982\u679c\u4e3aNone\uff0c\u5219\u8868\u660e\u53d6\u5230\u5f53\u524d\u65f6\u95f4\u3002 fq : \u662f\u5426\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c unclosed : \u662f\u5426\u5305\u542b\u672a\u6536\u76d8\u7684\u6570\u636e \"\"\" now = datetime . datetime . now () if frame_type in tf . day_level_frames : end = end or now . date () if unclosed and tf . day_shift ( end , 0 ) == now . date (): part2 = await cls . _get_cached_bars_n ( code , 1 , frame_type ) else : part2 = np . array ([], dtype = bars_dtype ) # get rest from persisted part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) bars = np . concatenate (( part1 , part2 )) else : end = end or now closed_end = tf . floor ( end , frame_type ) ff_min1 = tf . first_min_frame ( now , FrameType . MIN1 ) if tf . day_shift ( end , 0 ) < now . date () or end < ff_min1 : part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , end ) part2 = np . array ([], dtype = bars_dtype ) elif start >= ff_min1 : # all in cache part1 = np . array ([], dtype = bars_dtype ) n = tf . count_frames ( start , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) part2 = part2 [ part2 [ \"frame\" ] >= start ] else : # in both cache and persisted ff = tf . first_min_frame ( now , frame_type ) part1 = await cls . _get_persisted_bars_in_range ( code , frame_type , start , ff ) n = tf . count_frames ( ff , closed_end , frame_type ) + 1 part2 = await cls . _get_cached_bars_n ( code , n , frame_type , end ) if not unclosed : part2 = part2 [ part2 [ \"frame\" ] <= closed_end ] bars = np . concatenate (( part1 , part2 )) if fq : return cls . qfq ( bars ) else : return bars","title":"get_bars_in_range()"},{"location":"api/stock/#omicron.models.stock.Stock.get_latest_price","text":"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Parameters: Name Type Description Default codes Iterable[str] \u4ee3\u7801\u5217\u8868 required Returns: Type Description List[str] \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 Source code in omicron/models/stock.py @classmethod async def get_latest_price ( cls , codes : Iterable [ str ]) -> List [ str ]: \"\"\"\u83b7\u53d6\u591a\u652f\u80a1\u7968\u7684\u6700\u65b0\u4ef7\u683c\uff08\u4ea4\u6613\u65e5\u5f53\u5929\uff09\uff0c\u6682\u4e0d\u5305\u62ec\u6307\u6570 \u4ef7\u683c\u6570\u636e\u6bcf5\u79d2\u66f4\u65b0\u4e00\u6b21\uff0c\u63a5\u53d7\u591a\u53ea\u80a1\u7968\u67e5\u8be2\uff0c\u8fd4\u56de\u6700\u540e\u7f13\u5b58\u7684\u4ef7\u683c Args: codes: \u4ee3\u7801\u5217\u8868 Returns: \u8fd4\u56de\u4e00\u4e2aList\uff0c\u4ef7\u683c\u662f\u5b57\u7b26\u5f62\u5f0f\u7684\u6d6e\u70b9\u6570\u3002 \"\"\" if not codes : return [] _raw_code_list = [] for code_str in codes : code , _ = code_str . split ( \".\" ) _raw_code_list . append ( code ) _converted_data = [] raw_data = await cache . feature . hmget ( TRADE_LATEST_PRICE , * _raw_code_list ) for _data in raw_data : if _data is None : _converted_data . append ( _data ) else : _converted_data . append ( float ( _data )) return _converted_data","title":"get_latest_price()"},{"location":"api/stock/#omicron.models.stock.Stock.get_trade_price_limits","text":"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Parameters: Name Type Description Default code \u4e2a\u80a1\u4ee3\u7801 required begin \u5f00\u59cb\u65e5\u671f required end \u7ed3\u675f\u65e5\u671f required Returns: Type Description numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u4eceinfluxdb\u548ccache\u4e2d\u83b7\u53d6\u4e2a\u80a1\u5728[begin, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u4ef7\u3002 \u6da8\u8dcc\u505c\u4ef7\u53ea\u6709\u65e5\u7ebf\u6570\u636e\u624d\u6709\uff0c\u56e0\u6b64\uff0cFrameType\u56fa\u5b9a\u4e3aFrameType.DAY\uff0c \u5f53\u5929\u7684\u6570\u636e\u5b58\u653e\u4e8eredis\uff0c\u5982\u679c\u67e5\u8be2\u65e5\u671f\u5305\u542b\u5f53\u5929\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u4ececache\u4e2d\u8bfb\u53d6\u5e76\u8ffd\u52a0\u5230\u7ed3\u679c\u4e2d Args: code : \u4e2a\u80a1\u4ee3\u7801 begin : \u5f00\u59cb\u65e5\u671f end : \u7ed3\u675f\u65e5\u671f Returns: dtype\u4e3a[('frame', 'O'), ('high_limit', 'f4'), ('low_limit', 'f4')]\u7684numpy\u6570\u7ec4 \"\"\" cols = [ \"_time\" , \"high_limit\" , \"low_limit\" ] dtype = [( \"frame\" , \"O\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" )] if isinstance ( begin , datetime . datetime ): begin = begin . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate if isinstance ( end , datetime . datetime ): end = end . date () # \u5f3a\u5236\u8f6c\u6362\u4e3adate data_in_cache = await cls . _get_price_limit_in_cache ( code , begin , end ) client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( begin , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) ds = NumpyDeserializer ( dtype , use_cols = cols , converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if data_in_cache : result = np . concatenate ([ result , data_in_cache ]) return result","title":"get_trade_price_limits()"},{"location":"api/stock/#omicron.models.stock.Stock.persist_bars","text":"\u5c06\u884c\u60c5\u6570\u636e\u6301\u4e45\u5316 \u5982\u679c bars \u7c7b\u578b\u4e3aDict,\u5219key\u4e3a code \uff0cvalue\u4e3a bars \u3002\u5982\u679c\u5176\u7c7b\u578b\u4e3aBarsArray\u6216\u8005pd.DataFrame\uff0c\u5219 bars \u5404\u5217\u5b57\u6bb5\u5e94\u8be5\u4e3a coretypes.bars_dtype + (\"code\", \"O\")\u6784\u6210\u3002 Parameters: Name Type Description Default frame_type FrameType the frame type of the bars required bars Union[Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' BarsArray : \"\"\"\u5bf9\u884c\u60c5\u6570\u636e\u6267\u884c\u524d\u590d\u6743\u64cd\u4f5c\"\"\" # todo: \u8fd9\u91cc\u53ef\u4ee5\u4f18\u5316 if bars . size == 0 : return bars last = bars [ - 1 ][ \"factor\" ] for field in [ \"open\" , \"high\" , \"low\" , \"close\" , \"volume\" ]: bars [ field ] = bars [ field ] * ( bars [ \"factor\" ] / last ) return bars","title":"qfq()"},{"location":"api/stock/#omicron.models.stock.Stock.resample","text":"\u5c06\u539f\u6765\u4e3a from_frame \u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a to_frame \u7684\u884c\u60c5\u6570\u636e \u5982\u679c to_frame \u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c to_frame \u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219 from_frame \u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c from_frame \u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Parameters: Name Type Description Default bars BarsArray \u884c\u60c5\u6570\u636e required from_frame FrameType \u8f6c\u6362\u524d\u7684FrameType required to_frame FrameType \u8f6c\u6362\u540e\u7684FrameType required Returns: Type Description BarsArray \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod def resample ( cls , bars : BarsArray , from_frame : FrameType , to_frame : FrameType ) -> BarsArray : \"\"\"\u5c06\u539f\u6765\u4e3a`from_frame`\u7684\u884c\u60c5\u6570\u636e\u8f6c\u6362\u4e3a`to_frame`\u7684\u884c\u60c5\u6570\u636e \u5982\u679c`to_frame`\u4e3a\u65e5\u7ebf\u6216\u8005\u5206\u949f\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u5206\u949f\u7ebf\uff1b\u5982\u679c`to_frame`\u4e3a\u5468\u4ee5\u4e0a\u7ea7\u522b\u7ebf\uff0c\u5219`from_frame`\u5fc5\u987b\u4e3a\u65e5\u7ebf\u3002\u5176\u5b83\u7ea7\u522b\u4e4b\u95f4\u7684\u8f6c\u6362\u4e0d\u652f\u6301\u3002 \u5982\u679c`from_frame`\u4e3a1\u5206\u949f\u7ebf\uff0c\u5219\u5fc5\u987b\u4ece9\uff1a31\u8d77\u3002 Args: bars (BarsArray): \u884c\u60c5\u6570\u636e from_frame (FrameType): \u8f6c\u6362\u524d\u7684FrameType to_frame (FrameType): \u8f6c\u6362\u540e\u7684FrameType Returns: BarsArray: \u8f6c\u6362\u540e\u7684\u884c\u60c5\u6570\u636e \"\"\" if from_frame == FrameType . MIN1 : return cls . _resample_from_min1 ( bars , to_frame ) elif from_frame == FrameType . DAY : # pragma: no cover return cls . _resample_from_day ( bars , to_frame ) else : # pragma: no cover raise TypeError ( f \"unsupported from_frame: { from_frame } \" )","title":"resample()"},{"location":"api/stock/#omicron.models.stock.Stock.reset_cache","text":"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e Source code in omicron/models/stock.py @classmethod async def reset_cache ( cls ): \"\"\"\u6e05\u9664\u7f13\u5b58\u7684\u884c\u60c5\u6570\u636e\"\"\" try : for ft in itertools . chain ( tf . minute_level_frames , tf . day_level_frames ): keys = await cache . security . keys ( f \"bars: { ft . value } :*\" ) if keys : await cache . security . delete ( * keys ) finally : cls . _is_cache_empty = True","title":"reset_cache()"},{"location":"api/stock/#omicron.models.stock.Stock.save_trade_price_limits","text":"\u4fdd\u5b58\u6da8\u8dcc\u505c\u4ef7 Parameters: Name Type Description Default price_limits numpy.ndarray[Any, numpy.dtype[dtype([('frame', 'O'), ('code', 'O'), ('high_limit', ' Tuple [ List [ bool ]]: \"\"\"\u83b7\u53d6\u4e2a\u80a1\u5728[start, end]\u4e4b\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u672c\u51fd\u6570\u8fd4\u56de\u7684\u5e8f\u5217\u5728\u80a1\u7968\u6709\u505c\u724c\u7684\u60c5\u51b5\u4e0b\uff0c\u5c06\u4e0d\u80fd\u4e0e[start, end]\u4e00\u4e00\u5bf9\u5e94\u3002 Args: code: \u4e2a\u80a1\u4ee3\u7801 start: \u5f00\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u6da8\u8dcc\u505c\u6807\u5fd7\u5217\u8868(buy, sell) \"\"\" cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ] client = get_influx_client () measurement = cls . _measurement_name ( FrameType . DAY ) flux = ( Flux () . bucket ( client . _bucket ) . measurement ( measurement ) . range ( start , end ) . tags ({ \"code\" : code }) . fields ( cols ) . sort ( \"_time\" ) ) dtype = [ ( \"frame\" , \"O\" ), ( \"close\" , \"f4\" ), ( \"high_limit\" , \"f4\" ), ( \"low_limit\" , \"f4\" ), ] ds = NumpyDeserializer ( dtype , use_cols = [ \"_time\" , \"close\" , \"high_limit\" , \"low_limit\" ], converters = { \"_time\" : lambda x : ciso8601 . parse_datetime ( x ) . date ()}, # since we ask parse date in convertors, so we have to disable parse_date parse_date = None , ) result = await client . query ( flux , ds ) if result . size == 0 : return np . array ([], dtype = dtype ) return ( array_price_equal ( result [ \"close\" ], result [ \"high_limit\" ]), array_price_equal ( result [ \"close\" ], result [ \"low_limit\" ]), )","title":"trade_price_limit_flags()"},{"location":"api/stock/#omicron.models.stock.Stock.trade_price_limit_flags_ex","text":"\u83b7\u53d6\u80a1\u7968 code \u5728 [start, end] \u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Parameters: Name Type Description Default code str \u80a1\u7968\u4ee3\u7801 required start date \u8d77\u59cb\u65e5\u671f required end date \u7ed3\u675f\u65e5\u671f required Returns: Type Description Dict[datetime.date, Tuple[bool, bool]] \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict Source code in omicron/models/stock.py @classmethod async def trade_price_limit_flags_ex ( cls , code : str , start : datetime . date , end : datetime . date ) -> Dict [ datetime . date , Tuple [ bool , bool ]]: \"\"\"\u83b7\u53d6\u80a1\u7968`code`\u5728`[start, end]`\u533a\u95f4\u7684\u6da8\u8dcc\u505c\u6807\u5fd7 !!!Note \u5982\u679cend\u4e3a\u5f53\u5929\uff0c\u6ce8\u610f\u5728\u672a\u6536\u76d8\u4e4b\u524d\uff0c\u8fd9\u4e2a\u6da8\u8dcc\u505c\u6807\u5fd7\u90fd\u662f\u4e0d\u7a33\u5b9a\u7684 Args: code: \u80a1\u7968\u4ee3\u7801 start: \u8d77\u59cb\u65e5\u671f end: \u7ed3\u675f\u65e5\u671f Returns: \u4ee5\u65e5\u671f\u4e3akey\uff0c\uff08\u6da8\u505c\uff0c\u8dcc\u505c\uff09\u4e3a\u503c\u7684dict \"\"\" limit_prices = await cls . get_trade_price_limits ( code , start , end ) bars = await Stock . get_bars_in_range ( code , FrameType . DAY , start = start , end = end , fq = False ) close = bars [ \"close\" ] results = {} # aligned = True for i in range ( len ( bars )): if bars [ i ][ \"frame\" ] . item () . date () != limit_prices [ i ][ \"frame\" ]: # aligned = False logger . warning ( \"\u6570\u636e\u540c\u6b65\u9519\u8bef\uff0c\u6da8\u8dcc\u505c\u4ef7\u683c\u4e0e\u6536\u76d8\u4ef7\u65f6\u95f4\u4e0d\u4e00\u81f4: %s , %s \" , code , bars [ i ][ \"frame\" ]) break results [ limit_prices [ i ][ \"frame\" ]] = ( price_equal ( limit_prices [ i ][ \"high_limit\" ], close [ i ]), price_equal ( limit_prices [ i ][ \"low_limit\" ], close [ i ]), ) # if not aligned: # bars = bars[i:] # limit_prices = limit_prices[i:] # for frame in bars[\"frame\"]: # frame = frame.item().date() # close = bars[bars[\"frame\"].item().date() == frame][\"close\"].item() # high = limit_prices[limit_prices[\"frame\"] == frame][\"high_limit\"].item() # low = limit_prices[limit_prices[\"frame\"] == frame][\"low_limit\"].item() # results[frame] = ( # price_equal(high, close), # price_equal(low, close) # ) return results","title":"trade_price_limit_flags_ex()"},{"location":"api/strategy/","text":"base \u00b6 BacktestState dataclass \u00b6 BacktestState(start: Union[datetime.date, datetime.datetime], end: Union[datetime.date, datetime.datetime], barss: Union[NoneType, Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) async def sell ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ float ] = None , percent : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) async def filter_paused_stock ( self , buylist : List [ str ], dt : datetime . date ): secs = await Security . select ( dt ) . eval () in_trading = jq . get_price ( secs , fields = [ \"paused\" ], start_date = dt , end_date = dt , skip_paused = True )[ \"code\" ] . to_numpy () return np . intersect1d ( buylist , in_trading ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss : Dict [ str , BarsArray ], ** kwargs , ): \"\"\"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Args: frame: \u5f53\u524d\u65f6\u95f4\u5e27 frame_type: \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f i: \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 barss: \u5982\u679c\u8c03\u7528`backtest`\u65f6\u4f20\u5165\u4e86`portfolio`\u53ca`min_bars`\u53c2\u6570\uff0c\u5219`backtest`\u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528`predict`\u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7`barss`\u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9`predict`\u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 Keyword Args: \u5728`backtest`\u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 \"\"\" raise NotImplementedError async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot () cash property readonly \u00b6 \u8fd4\u56de\u5f53\u524d\u53ef\u7528\u73b0\u91d1 __init__ ( self , url , account = None , token = None , name = None , ver = None , is_backtest = True , start = None , end = None , frame_type = None , baseline = '399300.XSHE' ) special \u00b6 \u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default url str \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 required start Union[datetime.date, datetime.datetime] \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None end Union[datetime.date, datetime.datetime] \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None account Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 None token Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 None is_backtest bool \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 True name Optional[str] \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 None ver Optional[str] \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. None start Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 None end Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 None frame_type Optional[coretypes.types.FrameType] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f None baseline Optional[str] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 '399300.XSHE' Source code in omicron/strategy/base.py def __init__ ( self , url : str , account : Optional [ str ] = None , token : Optional [ str ] = None , name : Optional [ str ] = None , ver : Optional [ str ] = None , is_backtest : bool = True , start : Optional [ Frame ] = None , end : Optional [ Frame ] = None , frame_type : Optional [ FrameType ] = None , baseline : Optional [ str ] = \"399300.XSHE\" , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: url: \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 start: \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 end: \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 account: \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 token: \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 is_backtest: \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 name: \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 ver: \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. start: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 end: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 frame_type: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f baseline: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 \"\"\" self . ver = ver or \"0.1\" self . name = name or self . __class__ . __name__ . lower () + f \"_v { self . ver } \" self . token = token or uuid . uuid4 () . hex self . account = account or f \"smallcap- { self . token [ - 4 :] } \" self . url = url if is_backtest : if start is None or end is None or frame_type is None : raise ValueError ( \"start, end and frame_type must be presented.\" ) self . bs = BacktestState ( start , end , None , 0 ) self . bills = None self . metrics = None self . _frame_type = frame_type self . broker = TraderClient ( url , self . account , self . token , is_backtest = True , start = self . bs . start , end = self . bs . end , ) self . _baseline = baseline else : if account is None or token is None : raise ValueError ( \"account and token must be presented.\" ) self . broker = TraderClient ( url , self . account , self . token , is_backtest = False ) available_shares ( self , sec , dt = None ) \u00b6 \u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728 dt \u65e5\u7684\u53ef\u552e\u80a1\u6570 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required dt Union[datetime.date, datetime.datetime] \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 None Source code in omicron/strategy/base.py def available_shares ( self , sec : str , dt : Optional [ Frame ] = None ): \"\"\"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728`dt`\u65e5\u7684\u53ef\u552e\u80a1\u6570 Args: sec: \u8bc1\u5238\u4ee3\u7801 dt: \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 \"\"\" return self . broker . available_shares ( sec , dt ) backtest ( self , stop_on_error = True , ** kwargs ) async \u00b6 \u6267\u884c\u56de\u6d4b Parameters: Name Type Description Default stop_on_error bool \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 True Keyword arguments: Name Type Description portfolio Dict[str, BarsArray] \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c Source code in omicron/strategy/base.py async def backtest ( self , stop_on_error : bool = True , ** kwargs ): \"\"\"\u6267\u884c\u56de\u6d4b Args: stop_on_error: \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 Keyword Args: portfolio Dict[str, BarsArray]: \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int: \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c \"\"\" portfolio : List [ str ] = kwargs . get ( \"portfolio\" ) # type: ignore n = kwargs . get ( \"min_bars\" , 0 ) await self . _cache_bars_for_backtest ( portfolio , n ) self . bs . cursor = n converter = ( tf . int2date if self . _frame_type in tf . day_level_frames else tf . int2time ) for i , frame in enumerate ( tf . get_frames ( self . bs . start , self . bs . end , self . _frame_type ) # type: ignore ): barss = self . _next () logger . debug ( \" %s th iteration\" , i , date = converter ( frame )) try : await self . predict ( converter ( frame ), self . _frame_type , i , barss = barss , ** kwargs # type: ignore ) except Exception as e : logger . exception ( e ) if stop_on_error : raise e self . broker . stop_backtest () self . bills = self . broker . bills () self . metrics = self . broker . metrics ( baseline = self . _baseline ) buy ( self , sec , price = None , vol = None , money = None , order_time = None ) async \u00b6 \u4e70\u5165\u80a1\u7968 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required price Optional[float] \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 None vol Optional[int] \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 None money Optional[float] \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 None order_time Optional[datetime.datetime] \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 None Returns: Type Description Dict \u89c1traderclient\u4e2d\u7684 buy \u65b9\u6cd5\u3002 Source code in omicron/strategy/base.py async def buy ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ int ] = None , money : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) peek ( self , code , n ) async \u00b6 \u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 Source code in omicron/strategy/base.py async def peek ( self , code : str , n : int ): \"\"\"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 \"\"\" if self . bs is None or self . bs . barss is None : raise ValueError ( \"data is not cached\" ) if code in self . bs . barss : if self . bs . cursor + n + 1 < len ( self . bs . barss [ code ]): return Stock . qfq ( self . bs . barss [ code ][ self . bs . cursor : self . bs . cursor + n ] ) else : raise ValueError ( \"data is not cached\" ) plot_metrics ( self , indicator = None ) async \u00b6 \u7b56\u7565\u56de\u6d4b\u62a5\u544a Parameters: Name Type Description Default indicator Union[pandas.core.frame.DataFrame, List[Tuple]] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame None Source code in omicron/strategy/base.py async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot () positions ( self , dt = None ) \u00b6 \u8fd4\u56de\u5f53\u524d\u6301\u4ed3 Source code in omicron/strategy/base.py def positions ( self , dt : Optional [ datetime . date ] = None ): \"\"\"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3\"\"\" return self . broker . positions ( dt ) predict ( self , frame , frame_type , i , barss , ** kwargs ) async \u00b6 \u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) sma \u00b6 SMAStrategy ( BaseStrategy ) \u00b6 Source code in omicron/strategy/sma.py class SMAStrategy ( BaseStrategy ): def __init__ ( self , sec : str , n_short : int = 5 , n_long : int = 10 , * args , ** kwargs ): self . _sec = sec self . _n_short = n_short self . _n_long = n_long self . indicators = [] super () . __init__ ( * args , ** kwargs ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) ) predict ( self , frame , frame_type , i , barss , ** kwargs ) async \u00b6 \u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss \u5982\u679c\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 required Keyword Args: \u5728 backtest \u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 Source code in omicron/strategy/sma.py async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"\u7b56\u7565\u6846\u67b6"},{"location":"api/strategy/#omicron.strategy.base","text":"","title":"base"},{"location":"api/strategy/#omicron.strategy.base.BacktestState","text":"BacktestState(start: Union[datetime.date, datetime.datetime], end: Union[datetime.date, datetime.datetime], barss: Union[NoneType, Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time ) async def sell ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ float ] = None , percent : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time ) async def filter_paused_stock ( self , buylist : List [ str ], dt : datetime . date ): secs = await Security . select ( dt ) . eval () in_trading = jq . get_price ( secs , fields = [ \"paused\" ], start_date = dt , end_date = dt , skip_paused = True )[ \"code\" ] . to_numpy () return np . intersect1d ( buylist , in_trading ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss : Dict [ str , BarsArray ], ** kwargs , ): \"\"\"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Args: frame: \u5f53\u524d\u65f6\u95f4\u5e27 frame_type: \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f i: \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 barss: \u5982\u679c\u8c03\u7528`backtest`\u65f6\u4f20\u5165\u4e86`portfolio`\u53ca`min_bars`\u53c2\u6570\uff0c\u5219`backtest`\u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528`predict`\u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7`barss`\u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9`predict`\u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 Keyword Args: \u5728`backtest`\u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 \"\"\" raise NotImplementedError async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot ()","title":"BaseStrategy"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.cash","text":"\u8fd4\u56de\u5f53\u524d\u53ef\u7528\u73b0\u91d1","title":"cash"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.__init__","text":"\u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default url str \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 required start Union[datetime.date, datetime.datetime] \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None end Union[datetime.date, datetime.datetime] \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 None account Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 None token Optional[str] \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 None is_backtest bool \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 True name Optional[str] \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 None ver Optional[str] \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. None start Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 None end Union[datetime.date, datetime.datetime] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 None frame_type Optional[coretypes.types.FrameType] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f None baseline Optional[str] \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 '399300.XSHE' Source code in omicron/strategy/base.py def __init__ ( self , url : str , account : Optional [ str ] = None , token : Optional [ str ] = None , name : Optional [ str ] = None , ver : Optional [ str ] = None , is_backtest : bool = True , start : Optional [ Frame ] = None , end : Optional [ Frame ] = None , frame_type : Optional [ FrameType ] = None , baseline : Optional [ str ] = \"399300.XSHE\" , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: url: \u5b9e\u76d8/\u56de\u6d4b\u670d\u52a1\u5668\u7684\u5730\u5740\u3002 start: \u56de\u6d4b\u8d77\u59cb\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 end: \u56de\u6d4b\u7ed3\u675f\u65e5\u671f\u3002\u56de\u6d4b\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002 account: \u5b9e\u76d8/\u56de\u6d4b\u8d26\u53f7\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u4ee5\u7b56\u7565\u540d+\u968f\u673a\u5b57\u7b26\u6784\u5efa\u8d26\u53f7\u3002 token: \u5b9e\u76d8/\u56de\u6d4b\u65f6\u7528\u7684token\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\u5fc5\u987b\u4f20\u5165\u3002\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\uff0c\u5982\u679c\u672a\u4f20\u5165\uff0c\u5c06\u81ea\u52a8\u751f\u6210\u3002 is_backtest: \u662f\u5426\u4e3a\u56de\u6d4b\u6a21\u5f0f\u3002 name: \u7b56\u7565\u540d\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u4f7f\u7528\u7c7b\u540d\u5b57\u5c0f\u5199 ver: \u7b56\u7565\u7248\u672c\u53f7\u3002\u5982\u679c\u4e0d\u4f20\u5165\uff0c\u5219\u9ed8\u8ba4\u4e3a0.1. start: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u8d77\u59cb\u65f6\u95f4 end: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u7ed3\u675f\u65f6\u95f4 frame_type: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u63d0\u4f9b\u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u4e3b\u5468\u671f baseline: \u5982\u679c\u662f\u56de\u6d4b\u6a21\u5f0f\uff0c\u5219\u53ef\u4ee5\u63d0\u4f9b\u6b64\u53c2\u6570\u4f5c\u4e3a\u56de\u6d4b\u57fa\u51c6 \"\"\" self . ver = ver or \"0.1\" self . name = name or self . __class__ . __name__ . lower () + f \"_v { self . ver } \" self . token = token or uuid . uuid4 () . hex self . account = account or f \"smallcap- { self . token [ - 4 :] } \" self . url = url if is_backtest : if start is None or end is None or frame_type is None : raise ValueError ( \"start, end and frame_type must be presented.\" ) self . bs = BacktestState ( start , end , None , 0 ) self . bills = None self . metrics = None self . _frame_type = frame_type self . broker = TraderClient ( url , self . account , self . token , is_backtest = True , start = self . bs . start , end = self . bs . end , ) self . _baseline = baseline else : if account is None or token is None : raise ValueError ( \"account and token must be presented.\" ) self . broker = TraderClient ( url , self . account , self . token , is_backtest = False )","title":"__init__()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.available_shares","text":"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728 dt \u65e5\u7684\u53ef\u552e\u80a1\u6570 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required dt Union[datetime.date, datetime.datetime] \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 None Source code in omicron/strategy/base.py def available_shares ( self , sec : str , dt : Optional [ Frame ] = None ): \"\"\"\u8fd4\u56de\u7ed9\u5b9a\u80a1\u7968\u5728`dt`\u65e5\u7684\u53ef\u552e\u80a1\u6570 Args: sec: \u8bc1\u5238\u4ee3\u7801 dt: \u65e5\u671f\uff0c\u5728\u5b9e\u76d8\u4e2d\u65e0\u610f\u4e49\uff0c\u53ea\u80fd\u8fd4\u56de\u6700\u65b0\u6570\u636e\uff1b\u5728\u56de\u6d4b\u65f6\uff0c\u5fc5\u987b\u6307\u5b9a\u65e5\u671f\uff0c\u4e14\u8fd4\u56de\u6307\u5b9a\u65e5\u671f\u4e0b\u7684\u6301\u4ed3\u3002 \"\"\" return self . broker . available_shares ( sec , dt )","title":"available_shares()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.backtest","text":"\u6267\u884c\u56de\u6d4b Parameters: Name Type Description Default stop_on_error bool \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 True Keyword arguments: Name Type Description portfolio Dict[str, BarsArray] \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c Source code in omicron/strategy/base.py async def backtest ( self , stop_on_error : bool = True , ** kwargs ): \"\"\"\u6267\u884c\u56de\u6d4b Args: stop_on_error: \u5982\u679c\u4e3aTrue\uff0c\u5219\u53d1\u751f\u5f02\u5e38\u65f6\uff0c\u5c06\u505c\u6b62\u56de\u6d4b\u3002\u5426\u5219\u5ffd\u7565\u9519\u8bef\uff0c\u7ee7\u7eed\u6267\u884c\u3002 Keyword Args: portfolio Dict[str, BarsArray]: \u4ee3\u7801\u5217\u8868\u3002\u5728\u8be5\u5217\u8868\u4e2d\u7684\u54c1\u79cd\uff0c\u5c06\u5728\u56de\u6d4b\u4e4b\u524d\u81ea\u52a8\u9884\u53d6\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u8c03\u7528predict\u65f6\uff0c\u4f20\u5165\u622a\u6b62\u5230\u5f53\u524dframe\u7684\uff0c\u957f\u5ea6\u4e3an\u7684\u884c\u60c5\u6570\u636e\u3002\u884c\u60c5\u5468\u671f\u7531\u6784\u9020\u65f6\u7684frame_type\u6307\u5b9a min_bars int: \u56de\u6d4b\u65f6\u5fc5\u8981\u7684bars\u7684\u6700\u5c0f\u503c \"\"\" portfolio : List [ str ] = kwargs . get ( \"portfolio\" ) # type: ignore n = kwargs . get ( \"min_bars\" , 0 ) await self . _cache_bars_for_backtest ( portfolio , n ) self . bs . cursor = n converter = ( tf . int2date if self . _frame_type in tf . day_level_frames else tf . int2time ) for i , frame in enumerate ( tf . get_frames ( self . bs . start , self . bs . end , self . _frame_type ) # type: ignore ): barss = self . _next () logger . debug ( \" %s th iteration\" , i , date = converter ( frame )) try : await self . predict ( converter ( frame ), self . _frame_type , i , barss = barss , ** kwargs # type: ignore ) except Exception as e : logger . exception ( e ) if stop_on_error : raise e self . broker . stop_backtest () self . bills = self . broker . bills () self . metrics = self . broker . metrics ( baseline = self . _baseline )","title":"backtest()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.buy","text":"\u4e70\u5165\u80a1\u7968 Parameters: Name Type Description Default sec str \u8bc1\u5238\u4ee3\u7801 required price Optional[float] \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 None vol Optional[int] \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 None money Optional[float] \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 None order_time Optional[datetime.datetime] \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 None Returns: Type Description Dict \u89c1traderclient\u4e2d\u7684 buy \u65b9\u6cd5\u3002 Source code in omicron/strategy/base.py async def buy ( self , sec : str , price : Optional [ float ] = None , vol : Optional [ int ] = None , money : Optional [ float ] = None , order_time : Optional [ datetime . datetime ] = None , ) -> Dict : \"\"\"\u4e70\u5165\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u4e70\u4ef7\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u81ea\u52a8\u8f6c\u5e02\u4ef7\u4e70\u5165\u3002 vol: \u59d4\u4e70\u80a1\u6570\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u4e3a100\u7684\u6574\u6570\u3002\u5982\u679c\u4e3aNone, \u5219money\u5fc5\u987b\u4f20\u5165\u3002 money: \u59d4\u4e70\u91d1\u989d\u3002\u5982\u679c\u540c\u65f6\u4f20\u5165\u4e86vol\uff0c\u5219\u6b64\u53c2\u6570\u81ea\u52a8\u5ffd\u7565 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: \u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\u3002 \"\"\" logger . info ( \"buy order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { money : .0f } \" if money is not None else None , date = order_time , ) if vol is None : if money is None : raise ValueError ( \"parameter `mnoey` must be presented!\" ) return await self . broker . buy_by_money ( sec , money , price , order_time = order_time ) elif price is None : return self . broker . market_buy ( sec , vol , order_time = order_time ) else : return self . broker . buy ( sec , price , vol , order_time = order_time )","title":"buy()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.peek","text":"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 Source code in omicron/strategy/base.py async def peek ( self , code : str , n : int ): \"\"\"\u5141\u8bb8\u7b56\u7565\u5077\u770b\u672a\u6765\u6570\u636e \u53ef\u7528\u4ee5\u56e0\u5b50\u68c0\u9a8c\u573a\u666f\u3002\u8981\u6c42\u6570\u636e\u672c\u8eab\u5df2\u7f13\u5b58\u3002\u5426\u5219\u8bf7\u7528Stock.get_bars\u7b49\u65b9\u6cd5\u83b7\u53d6\u3002 \"\"\" if self . bs is None or self . bs . barss is None : raise ValueError ( \"data is not cached\" ) if code in self . bs . barss : if self . bs . cursor + n + 1 < len ( self . bs . barss [ code ]): return Stock . qfq ( self . bs . barss [ code ][ self . bs . cursor : self . bs . cursor + n ] ) else : raise ValueError ( \"data is not cached\" )","title":"peek()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.plot_metrics","text":"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Parameters: Name Type Description Default indicator Union[pandas.core.frame.DataFrame, List[Tuple]] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame None Source code in omicron/strategy/base.py async def plot_metrics ( self , indicator : Union [ pd . DataFrame , List [ Tuple ], None ] = None ): \"\"\"\u7b56\u7565\u56de\u6d4b\u62a5\u544a Args: indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u5217\u540d\u4e3a\"value\"\u7684DataFrame \"\"\" if self . bills is None or self . metrics is None : raise ValueError ( \"Please run `start_backtest` first.\" ) if isinstance ( indicator , list ): assert len ( indicator [ 0 ]) == 2 indicator = pd . DataFrame ( indicator , columns = [ \"date\" , \"value\" ]) indicator . set_index ( \"date\" , inplace = True ) if self . _baseline is not None : mg = MetricsGraph ( self . bills , self . metrics , baseline_code = self . _baseline , indicator = indicator , ) else : mg = MetricsGraph ( self . bills , self . metrics , indicator = indicator ) await mg . plot ()","title":"plot_metrics()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.positions","text":"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3 Source code in omicron/strategy/base.py def positions ( self , dt : Optional [ datetime . date ] = None ): \"\"\"\u8fd4\u56de\u5f53\u524d\u6301\u4ed3\"\"\" return self . broker . positions ( dt )","title":"positions()"},{"location":"api/strategy/#omicron.strategy.base.BaseStrategy.predict","text":"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss Dict[str, numpy.ndarray[Any, numpy.dtype[dtype([('frame', ' Union [ List , Dict ]: \"\"\"\u5356\u51fa\u80a1\u7968 Args: sec: \u8bc1\u5238\u4ee3\u7801 price: \u59d4\u5356\u4ef7\uff0c\u5982\u679c\u672a\u63d0\u4f9b\uff0c\u5219\u8f6c\u4e3a\u5e02\u4ef7\u5355 vol: \u59d4\u5356\u80a1\u6570\u3002\u5982\u679c\u4e3aNone\uff0c\u5219percent\u5fc5\u987b\u4f20\u5165 percent: \u5356\u51fa\u4e00\u5b9a\u6bd4\u4f8b\u7684\u6301\u4ed3\uff0c\u53d6\u503c\u4ecb\u4e8e0\u4e0e1\u4e4b\u95f4\u3002\u5982\u679c\u4e0evol\u540c\u65f6\u63d0\u4f9b\uff0c\u6b64\u53c2\u6570\u5c06\u88ab\u5ffd\u7565\u3002\u8bf7\u81ea\u884c\u4fdd\u8bc1\u6309\u6bd4\u4f8b\u6362\u7b97\u540e\u7684\u5356\u51fa\u6570\u636e\u662f\u7b26\u5408\u8981\u6c42\u7684\uff08\u6bd4\u5982\u4e0d\u4e3a100\u7684\u500d\u6570\uff0c\u4f46\u6709\u4e9b\u60c5\u51b5\u4e0b\u8fd9\u662f\u5141\u8bb8\u7684\uff0c\u6240\u4ee5\u7a0b\u5e8f\u8fd9\u91cc\u65e0\u6cd5\u5e2e\u4f60\u5224\u65ad\uff09 order_time: \u4ec5\u5728\u56de\u6d4b\u6a21\u5f0f\u4e0b\u9700\u8981\u63d0\u4f9b\u3002\u5b9e\u76d8\u6a21\u5f0f\u4e0b\uff0c\u6b64\u53c2\u6570\u81ea\u52a8\u88ab\u5ffd\u7565 Returns: Union[List, Dict]: \u6210\u4ea4\u8fd4\u56de\uff0c\u8be6\u89c1traderclient\u4e2d\u7684`buy`\u65b9\u6cd5\uff0ctrade server\u53ea\u8fd4\u56de\u4e00\u4e2a\u59d4\u6258\u5355\u4fe1\u606f \"\"\" logger . info ( \"sell order: %s , %s , %s , %s \" , sec , f \" { price : .2f } \" if price is not None else None , f \" { vol : .0f } \" if vol is not None else None , f \" { percent : .2% } \" if percent is not None else None , date = order_time , ) if vol is None and percent is None : raise ValueError ( \"either vol or percent must be presented\" ) if vol is None : if price is None : price = await self . broker . _get_market_sell_price ( sec , order_time = order_time ) # there's no market_sell_percent API in traderclient return self . broker . sell_percent ( sec , price , percent , order_time = order_time ) # type: ignore else : if price is None : return self . broker . market_sell ( sec , vol , order_time = order_time ) else : return self . broker . sell ( sec , price , vol , order_time = order_time )","title":"sell()"},{"location":"api/strategy/#omicron.strategy.sma","text":"","title":"sma"},{"location":"api/strategy/#omicron.strategy.sma.SMAStrategy","text":"Source code in omicron/strategy/sma.py class SMAStrategy ( BaseStrategy ): def __init__ ( self , sec : str , n_short : int = 5 , n_long : int = 10 , * args , ** kwargs ): self . _sec = sec self . _n_short = n_short self . _n_long = n_long self . indicators = [] super () . __init__ ( * args , ** kwargs ) async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"SMAStrategy"},{"location":"api/strategy/#omicron.strategy.sma.SMAStrategy.predict","text":"\u7b56\u7565\u8bc4\u4f30\u51fd\u6570\u3002\u5728\u6b64\u51fd\u6570\u4e2d\u5b9e\u73b0\u4ea4\u6613\u4fe1\u53f7\u68c0\u6d4b\u548c\u5904\u7406\u3002 Parameters: Name Type Description Default frame Union[datetime.date, datetime.datetime] \u5f53\u524d\u65f6\u95f4\u5e27 required frame_type FrameType \u5904\u7406\u7684\u6570\u636e\u4e3b\u5468\u671f required i int \u5f53\u524d\u65f6\u95f4\u79bb\u56de\u6d4b\u8d77\u59cb\u7684\u5355\u4f4d\u6570 required barss \u5982\u679c\u8c03\u7528 backtest \u65f6\u4f20\u5165\u4e86 portfolio \u53ca min_bars \u53c2\u6570\uff0c\u5219 backtest \u5c06\u4f1a\u5728\u56de\u6d4b\u4e4b\u524d\uff0c\u9884\u53d6\u4ece[start - min_bars * frame_type, end]\u95f4\u7684portfolio\u884c\u60c5\u6570\u636e\uff0c\u5e76\u5728\u6bcf\u6b21\u8c03\u7528 predict \u65b9\u6cd5\u65f6\uff0c\u901a\u8fc7 barss \u53c2\u6570\uff0c\u5c06[start - min_bars * frame_type, start + i * frame_type]\u95f4\u7684\u6570\u636e\u4f20\u7ed9 predict \u65b9\u6cd5\u3002\u4f20\u5165\u7684\u6570\u636e\u5df2\u8fdb\u884c\u524d\u590d\u6743\u3002 required Keyword Args: \u5728 backtest \u65b9\u6cd5\u4e2d\u7684\u4f20\u5165\u7684kwargs\u53c2\u6570\u5c06\u88ab\u900f\u4f20\u5230\u6b64\u65b9\u6cd5\u4e2d\u3002 Source code in omicron/strategy/sma.py async def predict ( self , frame : Frame , frame_type : FrameType , i : int , barss , ** kwargs ): if barss is None : raise ValueError ( \"please specify `portfolio` and `min_bars`\" ) bars : Union [ BarsArray , None ] = barss . get ( self . _sec ) if bars is None : raise ValueError ( f \" { self . _sec } not found in `portfolio`\" ) ma_short = np . mean ( bars [ \"close\" ][ - self . _n_short :]) ma_long = np . mean ( bars [ \"close\" ][ - self . _n_long :]) if ma_short > ma_long : self . indicators . append (( frame , 1 )) if self . cash >= 100 * bars [ \"close\" ][ - 1 ]: await self . buy ( self . _sec , money = self . cash , order_time = tf . combine_time ( frame , 14 , 55 ), ) elif ma_short < ma_long : self . indicators . append (( frame , - 1 )) if self . available_shares ( self . _sec , frame ) > 0 : await self . sell ( self . _sec , percent = 1.0 , order_time = tf . combine_time ( frame , 14 , 55 ) )","title":"predict()"},{"location":"api/talib/","text":"core \u00b6 angle ( ts , threshold = 0.01 , loss_func = 're' ) \u00b6 \u6c42\u65f6\u95f4\u5e8f\u5217 ts \u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e x \u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53 angle \u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53 angle \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c ts \u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np . array ([ i for i in range ( 5 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 45, rad: pi/2 0.707 >>> ts = np . array ([ np . sqrt ( 3 ) / 3 * i for i in range ( 10 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 30, rad: pi/6 0.866 >>> ts = np . array ([ - np . sqrt ( 3 ) / 3 * i for i in range ( 7 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 150, rad: 5*pi/6 - 0.866 Parameters: Name Type Description Default ts required Returns: Type Description Tuple[float, float] \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 Source code in omicron/talib/core.py def angle ( ts , threshold = 0.01 , loss_func = \"re\" ) -> Tuple [ float , float ]: \"\"\"\u6c42\u65f6\u95f4\u5e8f\u5217`ts`\u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e`x`\u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53`angle`\u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53`angle` \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c`ts`\u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np.array([ i for i in range(5)]) >>> round(angle(ts)[1], 3) # degree: 45, rad: pi/2 0.707 >>> ts = np.array([ np.sqrt(3) / 3 * i for i in range(10)]) >>> round(angle(ts)[1],3) # degree: 30, rad: pi/6 0.866 >>> ts = np.array([ -np.sqrt(3) / 3 * i for i in range(7)]) >>> round(angle(ts)[1], 3) # degree: 150, rad: 5*pi/6 -0.866 Args: ts: Returns: \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) if err > threshold : return ( err , None ) v = np . array ([ 1 , a + b ]) vx = np . array ([ 1 , 0 ]) return err , copysign ( np . dot ( v , vx ) / ( norm ( v ) * norm ( vx )), a ) clustering ( numbers , n ) \u00b6 \u5c06\u6570\u7ec4 numbers \u5212\u5206\u4e3a n \u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np . array ([ 1 , 1 , 1 , 2 , 4 , 6 , 8 , 7 , 4 , 5 , 6 ]) >>> clustering ( numbers , 2 ) [( 0 , 4 ), ( 4 , 7 )] Returns: Type Description List[Tuple[int, int]] \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 Source code in omicron/talib/core.py def clustering ( numbers : np . ndarray , n : int ) -> List [ Tuple [ int , int ]]: \"\"\"\u5c06\u6570\u7ec4`numbers`\u5212\u5206\u4e3a`n`\u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np.array([1,1,1,2,4,6,8,7,4,5,6]) >>> clustering(numbers, 2) [(0, 4), (4, 7)] Returns: \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 \"\"\" result = ckwrap . cksegs ( numbers , n ) clusters = [] for pos , size in zip ( result . centers , result . sizes ): clusters . append (( int ( pos - size // 2 - 1 ), int ( size ))) return clusters exp_moving_average ( values , window ) \u00b6 Numpy implementation of EMA Source code in omicron/talib/core.py def exp_moving_average ( values , window ): \"\"\"Numpy implementation of EMA\"\"\" weights = np . exp ( np . linspace ( - 1.0 , 0.0 , window )) weights /= weights . sum () a = np . convolve ( values , weights , mode = \"full\" )[: len ( values )] a [: window ] = a [ window ] return a mean_absolute_error ( y , y_hat ) \u00b6 \u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> mean_absolute_error ( y , y ) 0.0 >>> mean_absolute_error ( y , y_hat ) 0.8 Parameters: Name Type Description Default y np.array \u771f\u503c\u5e8f\u5217 required y_hat \u6bd4\u8f83\u5e8f\u5217 required Returns: Type Description float \u5e73\u5747\u7edd\u5bf9\u503c\u5dee Source code in omicron/talib/core.py def mean_absolute_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> mean_absolute_error(y, y) 0.0 >>> mean_absolute_error(y, y_hat) 0.8 Args: y (np.array): \u771f\u503c\u5e8f\u5217 y_hat: \u6bd4\u8f83\u5e8f\u5217 Returns: float: \u5e73\u5747\u7edd\u5bf9\u503c\u5dee \"\"\" return nanmean ( np . abs ( y - y_hat )) moving_average ( ts , win , padding = True ) \u00b6 \u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np . arange ( 7 ) >>> moving_average ( ts , 5 ) array ([ nan , nan , nan , nan , 2. , 3. , 4. ]) Parameters: Name Type Description Default ts Sequence the input array required win int the window size required padding if True, then the return will be equal length as input, padding with np.NaN at the beginning True Returns: Type Description ndarray The moving mean of the input array along the specified axis. The output has the same shape as the input. Source code in omicron/talib/core.py def moving_average ( ts : Sequence , win : int , padding = True ) -> np . ndarray : \"\"\"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np.arange(7) >>> moving_average(ts, 5) array([nan, nan, nan, nan, 2., 3., 4.]) Args: ts (Sequence): the input array win (int): the window size padding: if True, then the return will be equal length as input, padding with np.NaN at the beginning Returns: The moving mean of the input array along the specified axis. The output has the same shape as the input. \"\"\" ma = move_mean ( ts , win ) if padding : return ma else : return ma [ win - 1 :] normalize ( X , scaler = 'maxabs' ) \u00b6 \u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 sklearn Examples: >>> X = [[ 1. , - 1. , 2. ], ... [ 2. , 0. , 0. ], ... [ 0. , 1. , - 1. ]] >>> expected = [[ 0.4082 , - 0.4082 , 0.8165 ], ... [ 1. , 0. , 0. ], ... [ 0. , 0.7071 , - 0.7071 ]] >>> X_hat = normalize ( X , scaler = 'unit_vector' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 4 ) >>> expected = [[ 0.5 , - 1. , 1. ], ... [ 1. , 0. , 0. ], ... [ 0. , 1. , - 0.5 ]] >>> X_hat = normalize ( X , scaler = 'maxabs' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 2 ) >>> expected = [[ 0.5 , 0. , 1. ], ... [ 1. , 0.5 , 0.33333333 ], ... [ 0. , 1. , 0. ]] >>> X_hat = normalize ( X , scaler = 'minmax' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) >>> X = [[ 0 , 0 ], ... [ 0 , 0 ], ... [ 1 , 1 ], ... [ 1 , 1 ]] >>> expected = [[ - 1. , - 1. ], ... [ - 1. , - 1. ], ... [ 1. , 1. ], ... [ 1. , 1. ]] >>> X_hat = normalize ( X , scaler = 'standard' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) Parameters: Name Type Description Default X 2D array required scaler str [description]. Defaults to 'maxabs_scale'. 'maxabs' Source code in omicron/talib/core.py def normalize ( X , scaler = \"maxabs\" ): \"\"\"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 [sklearn] [sklearn]: https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#results Examples: >>> X = [[ 1., -1., 2.], ... [ 2., 0., 0.], ... [ 0., 1., -1.]] >>> expected = [[ 0.4082, -0.4082, 0.8165], ... [ 1., 0., 0.], ... [ 0., 0.7071, -0.7071]] >>> X_hat = normalize(X, scaler='unit_vector') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal=4) >>> expected = [[0.5, -1., 1.], ... [1., 0., 0.], ... [0., 1., -0.5]] >>> X_hat = normalize(X, scaler='maxabs') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 2) >>> expected = [[0.5 , 0. , 1. ], ... [1. , 0.5 , 0.33333333], ... [0. , 1. , 0. ]] >>> X_hat = normalize(X, scaler='minmax') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal= 3) >>> X = [[0, 0], ... [0, 0], ... [1, 1], ... [1, 1]] >>> expected = [[-1., -1.], ... [-1., -1.], ... [ 1., 1.], ... [ 1., 1.]] >>> X_hat = normalize(X, scaler='standard') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 3) Args: X (2D array): scaler (str, optional): [description]. Defaults to 'maxabs_scale'. \"\"\" if scaler == \"maxabs\" : return MaxAbsScaler () . fit_transform ( X ) elif scaler == \"unit_vector\" : return sklearn . preprocessing . normalize ( X , norm = \"l2\" ) elif scaler == \"minmax\" : return minmax_scale ( X ) elif scaler == \"standard\" : return StandardScaler () . fit_transform ( X ) pct_error ( y , y_hat ) \u00b6 \u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> pct_error ( y , y_hat ) 0.4 Parameters: Name Type Description Default y np.array [description] required y_hat np.array [description] required Returns: Type Description float [description] Source code in omicron/talib/core.py def pct_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> pct_error(y, y_hat) 0.4 Args: y (np.array): [description] y_hat (np.array): [description] Returns: float: [description] \"\"\" mae = mean_absolute_error ( y , y_hat ) return mae / nanmean ( np . abs ( y )) polyfit ( ts , deg = 2 , loss_func = 're' ) \u00b6 \u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [ i for i in range ( 5 )] >>> err , ( a , b ) = polyfit ( ts , deg = 1 ) >>> print ( round ( err , 3 ), round ( a , 1 )) 0.0 1.0 Parameters: Name Type Description Default ts Sequence \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 required deg int \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 2 loss_func str \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a mae , rmse , mse \u6216 re \u3002Defaults to re (relative_error) 're' Returns: Type Description [Tuple] \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) Source code in omicron/talib/core.py def polyfit ( ts : Sequence , deg : int = 2 , loss_func = \"re\" ) -> Tuple : \"\"\"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [i for i in range(5)] >>> err, (a, b) = polyfit(ts, deg=1) >>> print(round(err, 3), round(a, 1)) 0.0 1.0 Args: ts (Sequence): \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 deg (int): \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 loss_func (str): \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a`mae`, `rmse`,`mse` \u6216`re`\u3002Defaults to `re` (relative_error) Returns: [Tuple]: \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) \"\"\" if deg not in ( 1 , 2 ): raise ValueError ( \"deg must be 1 or 2\" ) try : if any ( np . isnan ( ts )): raise ValueError ( \"ts contains nan\" ) x = np . array ( list ( range ( len ( ts )))) z = np . polyfit ( x , ts , deg = deg ) p = np . poly1d ( z ) ts_hat = np . array ([ p ( xi ) for xi in x ]) if loss_func == \"mse\" : error = np . mean ( np . square ( ts - ts_hat )) elif loss_func == \"rmse\" : error = np . sqrt ( np . mean ( np . square ( ts - ts_hat ))) elif loss_func == \"mae\" : error = mean_absolute_error ( ts , ts_hat ) else : # defaults to relative error error = pct_error ( ts , ts_hat ) if deg == 2 : a , b , c = z [ 0 ], z [ 1 ], z [ 2 ] axis_x = - b / ( 2 * a ) if a != 0 : axis_y = ( 4 * a * c - b * b ) / ( 4 * a ) else : axis_y = None return error , z , ( axis_x , axis_y ) elif deg == 1 : return error , z except Exception : error = 1e9 if deg == 1 : return error , ( np . nan , np . nan ) else : return error , ( np . nan , np . nan , np . nan ), ( np . nan , np . nan ) slope ( ts , loss_func = 're' ) \u00b6 \u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Parameters: Name Type Description Default ts np.array [description] required loss_func str [description]. Defaults to 're'. 're' Source code in omicron/talib/core.py def slope ( ts : np . array , loss_func = \"re\" ): \"\"\"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Args: ts (np.array): [description] loss_func (str, optional): [description]. Defaults to 're'. \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) return err , a smooth ( ts , win , poly_order = 1 , mode = 'interp' ) \u00b6 \u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Parameters: Name Type Description Default ts np.array [description] required win int [description] required poly_order int [description]. Defaults to 1. 1 Source code in omicron/talib/core.py def smooth ( ts : np . array , win : int , poly_order = 1 , mode = \"interp\" ): \"\"\"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Args: ts (np.array): [description] win (int): [description] poly_order (int, optional): [description]. Defaults to 1. \"\"\" return savgol_filter ( ts , win , poly_order , mode = mode ) weighted_moving_average ( ts , win ) \u00b6 \u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Parameters: Name Type Description Default ts np.array [description] required win int [description] required Returns: Type Description np.array [description] Source code in omicron/talib/core.py def weighted_moving_average ( ts : np . array , win : int ) -> np . array : \"\"\"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Args: ts (np.array): [description] win (int): [description] Returns: np.array: [description] \"\"\" w = [ 2 * ( i + 1 ) / ( win * ( win + 1 )) for i in range ( win )] return np . convolve ( ts , w , \"valid\" ) morph \u00b6 \u5f62\u6001\u68c0\u6d4b\u76f8\u5173\u65b9\u6cd5 BreakoutFlag ( IntEnum ) \u00b6 An enumeration. Source code in omicron/talib/morph.py class BreakoutFlag ( IntEnum ): UP = 1 DOWN = - 1 NONE = 0 CrossFlag ( IntEnum ) \u00b6 An enumeration. Source code in omicron/talib/morph.py class CrossFlag ( IntEnum ): UPCROSS = 1 DOWNCROSS = - 1 NONE = 0 breakout ( ts , upthres = 0.01 , downthres =- 0.01 , confirm = 1 ) \u00b6 \u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys 0.01 downthres float \u8bf7\u53c2\u8003 peaks_and_valleys -0.01 confirm int \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 1 Returns: Type Description BreakoutFlag \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 Source code in omicron/talib/morph.py def breakout ( ts : np . ndarray , upthres : float = 0.01 , downthres : float = - 0.01 , confirm : int = 1 ) -> BreakoutFlag : \"\"\"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] confirm (int, optional): \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 Returns: \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 \"\"\" support , resist , _ = support_resist_lines ( ts [: - confirm ], upthres , downthres ) x0 = len ( ts ) - confirm - 1 x = list ( range ( len ( ts ) - confirm , len ( ts ))) if resist is not None : if np . all ( ts [ x ] > resist ( x )) and ts [ x0 ] <= resist ( x0 ): return BreakoutFlag . UP if support is not None : if np . all ( ts [ x ] < support ( x )) and ts [ x0 ] >= support ( x0 ): return BreakoutFlag . DOWN return BreakoutFlag . NONE cross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 Returns: Type Description CrossFlag (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g Source code in omicron/talib/morph.py def cross ( f : np . ndarray , g : np . ndarray ) -> CrossFlag : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 returns: (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 0 : return CrossFlag . NONE , 0 # \u5982\u679c\u5b58\u5728\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u4ea4\u70b9\uff0c\u53d6\u6700\u540e\u4e00\u4e2a idx = indices [ - 1 ] if f [ idx ] < g [ idx ]: return CrossFlag . UPCROSS , idx elif f [ idx ] > g [ idx ]: return CrossFlag . DOWNCROSS , idx else : return CrossFlag ( np . sign ( g [ idx - 1 ] - f [ idx - 1 ])), idx energy_hump ( bars , thresh = 2 ) \u00b6 \u68c0\u6d4b bars \u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Parameters: Name Type Description Default bars [('frame', ' Optional [ Tuple [ int , int ]]: \"\"\"\u68c0\u6d4b`bars`\u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Args: bars: \u884c\u60c5\u6570\u636e thresh: \u6700\u540e\u4e00\u6ce2\u91cf\u5fc5\u987b\u5927\u4e8e20\u5929\u5747\u91cf\u7684\u500d\u6570\u3002 Returns: \u5982\u679c\u4e0d\u5b58\u5728\u80fd\u91cf\u9a7c\u5cf0\u7684\u60c5\u5f62\uff0c\u5219\u8fd4\u56deNone\uff0c\u5426\u5219\u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u9a7c\u5cf0\u79bb\u73b0\u5728\u7684\u8ddd\u79bb\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \"\"\" vol = bars [ \"volume\" ] std = np . std ( vol [ 1 :] / vol [: - 1 ]) pvs = peak_valley_pivots ( vol , std , 0 ) frames = bars [ \"frame\" ] pvs [ 0 ] = 0 pvs [ - 1 ] = - 1 peaks = np . argwhere ( pvs == 1 ) mn = np . mean ( vol [ peaks ]) # \u9876\u70b9\u4e0d\u80fd\u7f29\u91cf\u5230\u5c16\u5cf0\u5747\u503c\u4ee5\u4e0b real_peaks = np . intersect1d ( np . argwhere ( vol > mn ), peaks ) if len ( real_peaks ) < 2 : return None logger . debug ( \"found %s peaks at %s \" , len ( real_peaks ), frames [ real_peaks ]) lp = real_peaks [ - 1 ] ma = moving_average ( vol , 20 )[ lp ] if vol [ lp ] < ma * thresh : logger . debug ( \"vol of last peak[ %s ] is less than mean_vol(20) * thresh[ %s ]\" , vol [ lp ], ma * thresh , ) return None return len ( bars ) - real_peaks [ - 1 ], real_peaks [ - 1 ] - real_peaks [ 0 ] inverse_vcross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Parameters: Name Type Description Default f np.array [description] required g np.array [description] required Returns: Type Description Tuple [description] Source code in omicron/talib/morph.py def inverse_vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Args: f (np.array): [description] g (np.array): [description] Returns: Tuple: [description] \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] < g [ idx0 ] and f [ idx1 ] > g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None ) peaks_and_valleys ( ts , up_thresh = None , down_thresh = None ) \u00b6 \u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required up_thresh float \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee None down_thresh float \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 None Returns: Type Description np.ndarray \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 Source code in omicron/talib/morph.py def peaks_and_valleys ( ts : np . ndarray , up_thresh : Optional [ float ] = None , down_thresh : Optional [ float ] = None , ) -> np . ndarray : \"\"\"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 up_thresh (float): \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee down_thresh (float): \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 Returns: np.ndarray: \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) if any ([ up_thresh is None , down_thresh is None ]): change_rate = ts [ 1 :] / ts [: - 1 ] - 1 std = np . std ( change_rate ) up_thresh = up_thresh or 2 * std down_thresh = down_thresh or - 2 * std return peak_valley_pivots ( ts , up_thresh , down_thresh ) plateaus ( numbers , min_size , fall_in_range_ratio = 0.97 ) \u00b6 \u7edf\u8ba1\u6570\u7ec4 numbers \u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7 fall_in_range_ratio \u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Parameters: Name Type Description Default numbers ndarray \u8f93\u5165\u6570\u7ec4 required min_size int \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 required fall_in_range_ratio float \u8d85\u8fc7 fall_in_range_ratio \u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 0.97 Returns: Type Description List[Tuple] \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 Source code in omicron/talib/morph.py def plateaus ( numbers : np . ndarray , min_size : int , fall_in_range_ratio : float = 0.97 ) -> List [ Tuple ]: \"\"\"\u7edf\u8ba1\u6570\u7ec4`numbers`\u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7`fall_in_range_ratio`\u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Args: numbers: \u8f93\u5165\u6570\u7ec4 min_size: \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 fall_in_range_ratio: \u8d85\u8fc7`fall_in_range_ratio`\u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 Returns: \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 \"\"\" if numbers . size <= min_size : n = 1 else : n = numbers . size // min_size clusters = clustering ( numbers , n ) plats = [] for ( start , length ) in clusters : if length < min_size : continue y = numbers [ start : start + length ] mean = np . mean ( y ) std = np . std ( y ) inrange = len ( y [ np . abs ( y - mean ) < 3 * std ]) ratio = inrange / length if ratio >= fall_in_range_ratio : plats . append (( start , length )) return plats rsi_bottom_distance ( close , thresh = None ) \u00b6 \u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : low_watermark , _ , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u8c37\u503cRSI<30 valley_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] # RSI\u4f4e\u6c34\u5e73\u7684\u6700\u5927\u503c\uff1a\u4f4e\u6c34\u5e73*1.01 low_rsi_index = np . where ( rsi <= low_watermark * 1.01 )[ 0 ] if len ( valley_rsi_index ) > 0 : distance = len ( rsi ) - 1 - valley_rsi_index [ - 1 ] if len ( low_rsi_index ) > 0 : if low_rsi_index [ - 1 ] >= valley_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - low_rsi_index [ - 1 ] return distance rsi_bottom_divergent ( close , thresh = None , rsi_limit = 30 ) \u00b6 \u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 30 Returns: Type Description int \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 30 ) -> int : \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) valley_index = np . where (( pivots == - 1 ) & ( rsi <= rsi_limit ))[ 0 ] if len ( valley_index ) >= 2 : if ( close [ valley_index [ - 1 ]] < close [ valley_index [ - 2 ]]) and ( rsi [ valley_index [ - 1 ]] > rsi [ valley_index [ - 2 ]] ): bottom_dev_distance = length - 1 - valley_index [ - 1 ] return bottom_dev_distance rsi_predict_price ( close , thresh = None ) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[float, float] \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 Source code in omicron/talib/morph.py def rsi_predict_price ( close : np . ndarray , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) valley_rsi , peak_rsi , _ = rsi_watermarks ( close , thresh = thresh ) pivot = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivot [ 0 ], pivot [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e price_change = pd . Series ( close ) . diff ( 1 ) . values ave_price_change = ( abs ( price_change )[ - 6 :] . mean ()) * 5 ave_price_raise = ( np . maximum ( price_change , 0 )[ - 6 :] . mean ()) * 5 if valley_rsi is not None : predicted_low_change = ( ave_price_change ) - ave_price_raise / ( 0.01 * valley_rsi ) if predicted_low_change > 0 : predicted_low_change = 0 predicted_low_price = close [ - 1 ] + predicted_low_change else : predicted_low_price = None if peak_rsi is not None : predicted_high_change = ( ave_price_raise - ave_price_change ) / ( 0.01 * peak_rsi - 1 ) - ave_price_change if predicted_high_change < 0 : predicted_high_change = 0 predicted_high_price = close [ - 1 ] + predicted_high_change else : predicted_high_price = None return predicted_low_price , predicted_high_price rsi_top_distance ( close , thresh = None ) \u00b6 \u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : _ , high_watermark , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u5cf0\u503cRSI>70 peak_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] # RSI\u9ad8\u6c34\u5e73\u7684\u6700\u5c0f\u503c\uff1a\u9ad8\u6c34\u5e73*0.99 high_rsi_index = np . where ( rsi >= high_watermark * 0.99 )[ 0 ] if len ( peak_rsi_index ) > 0 : distance = len ( rsi ) - 1 - peak_rsi_index [ - 1 ] if len ( high_rsi_index ) > 0 : if high_rsi_index [ - 1 ] >= peak_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - high_rsi_index [ - 1 ] return distance rsi_top_divergent ( close , thresh = None , rsi_limit = 70 ) \u00b6 \u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 70 Returns: Type Description Tuple[int, int] \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 70 ) -> Tuple [ int , int ]: \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) peak_index = np . where (( pivots == 1 ) & ( rsi >= rsi_limit ))[ 0 ] if len ( peak_index ) >= 2 : if ( close [ peak_index [ - 1 ]] > close [ peak_index [ - 2 ]]) and ( rsi [ peak_index [ - 1 ]] < rsi [ peak_index [ - 2 ]] ): top_dev_distance = length - 1 - peak_index [ - 1 ] return top_dev_distance rsi_watermarks ( close , thresh = None ) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description Tuple[float, float, float] \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 Source code in omicron/talib/morph.py def rsi_watermarks ( close : np . array , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e # \u5cf0\u503cRSI>70; \u8c37\u5904\u7684RSI<30; peaks_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] valleys_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] if len ( peaks_rsi_index ) == 0 : high_watermark = None elif len ( peaks_rsi_index ) == 1 : high_watermark = rsi [ peaks_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u5747\u503c\u6765\u786e\u5b9a\u8d70\u52bf high_watermark = np . nanmean ( rsi [ peaks_rsi_index [ - 2 :]]) if len ( valleys_rsi_index ) == 0 : low_watermark = None elif len ( valleys_rsi_index ) == 1 : low_watermark = rsi [ valleys_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u6765\u786e\u5b9a\u8d70\u52bf low_watermark = np . nanmean ( rsi [ valleys_rsi_index [ - 2 :]]) return low_watermark , high_watermark , rsi [ - 1 ] support_resist_lines ( ts , upthres = None , downthres = None ) \u00b6 \u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def show_support_resist_lines ( ts ): import plotly.graph_objects as go fig = go . Figure () support , resist , x_start = support_resist_lines ( ts , 0.03 , - 0.03 ) fig . add_trace ( go . Scatter ( x = np . arange ( len ( ts )), y = ts )) x = np . arange ( len ( ts ))[ x_start :] fig . add_trace ( go . Line ( x = x , y = support ( x ))) fig . add_trace ( go . Line ( x = x , y = resist ( x ))) fig . show () np . random . seed ( 1978 ) X = np . cumprod ( 1 + np . random . randn ( 100 ) * 0.01 ) show_support_resist_lines ( X ) the above code will show this Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys None downthres float \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[Callable, Callable, numpy.ndarray] \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone Source code in omicron/talib/morph.py def support_resist_lines ( ts : np . ndarray , upthres : float = None , downthres : float = None ) -> Tuple [ Callable , Callable , np . ndarray ]: \"\"\"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: ```python def show_support_resist_lines(ts): import plotly.graph_objects as go fig = go.Figure() support, resist, x_start = support_resist_lines(ts, 0.03, -0.03) fig.add_trace(go.Scatter(x=np.arange(len(ts)), y=ts)) x = np.arange(len(ts))[x_start:] fig.add_trace(go.Line(x=x, y = support(x))) fig.add_trace(go.Line(x=x, y = resist(x))) fig.show() np.random.seed(1978) X = np.cumprod(1 + np.random.randn(100) * 0.01) show_support_resist_lines(X) ``` the above code will show this ![](https://images.jieyu.ai/images/202204/support_resist.png) Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) pivots = peaks_and_valleys ( ts , upthres , downthres ) pivots [ 0 ] = 0 pivots [ - 1 ] = 0 arg_max = np . argwhere ( pivots == 1 ) . flatten () arg_min = np . argwhere ( pivots == - 1 ) . flatten () resist = None support = None if len ( arg_max ) >= 2 : arg_max = arg_max [ - 2 :] y = ts [ arg_max ] coeff = np . polyfit ( arg_max , y , deg = 1 ) resist = np . poly1d ( coeff ) if len ( arg_min ) >= 2 : arg_min = arg_min [ - 2 :] y = ts [ arg_min ] coeff = np . polyfit ( arg_min , y , deg = 1 ) support = np . poly1d ( coeff ) return support , resist , np . min ([ * arg_min , * arg_max ]) valley_detect ( close , thresh = ( 0.05 , - 0.02 )) \u00b6 \u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys (0.05, -0.02) Returns: Type Description int \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 Source code in omicron/talib/morph.py def valley_detect ( close : np . ndarray , thresh : Tuple [ float , float ] = ( 0.05 , - 0.02 ) ) -> int : \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) flags = pivots [ pivots != 0 ] increased = None lowest_distance = None if ( flags [ - 2 ] == - 1 ) and ( flags [ - 1 ] == 1 ): length = len ( pivots ) valley_index = np . where ( pivots == - 1 )[ 0 ] increased = ( close [ - 1 ] - close [ valley_index [ - 1 ]]) / close [ valley_index [ - 1 ]] lowest_distance = int ( length - 1 - valley_index [ - 1 ]) return lowest_distance , increased vcross ( f , g ) \u00b6 \u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np . array ([ 3 * i ** 2 - 20 * i + 2 for i in range ( 10 )]) >>> g = np . array ([ i - 5 for i in range ( 10 )]) >>> flag , indices = vcross ( f , g ) >>> assert flag is True >>> assert indices [ 0 ] == 0 >>> assert indices [ 1 ] == 6 Parameters: Name Type Description Default f first sequence required g the second sequence required Returns: Type Description Tuple (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 Source code in omicron/talib/morph.py def vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np.array([ 3 * i ** 2 - 20 * i + 2 for i in range(10)]) >>> g = np.array([ i - 5 for i in range(10)]) >>> flag, indices = vcross(f, g) >>> assert flag is True >>> assert indices[0] == 0 >>> assert indices[1] == 6 Args: f: first sequence g: the second sequence Returns: (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] > g [ idx0 ] and f [ idx1 ] < g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"talib"},{"location":"api/talib/#omicron.talib.core","text":"","title":"core"},{"location":"api/talib/#omicron.talib.core.angle","text":"\u6c42\u65f6\u95f4\u5e8f\u5217 ts \u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e x \u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53 angle \u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53 angle \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c ts \u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np . array ([ i for i in range ( 5 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 45, rad: pi/2 0.707 >>> ts = np . array ([ np . sqrt ( 3 ) / 3 * i for i in range ( 10 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 30, rad: pi/6 0.866 >>> ts = np . array ([ - np . sqrt ( 3 ) / 3 * i for i in range ( 7 )]) >>> round ( angle ( ts )[ 1 ], 3 ) # degree: 150, rad: 5*pi/6 - 0.866 Parameters: Name Type Description Default ts required Returns: Type Description Tuple[float, float] \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 Source code in omicron/talib/core.py def angle ( ts , threshold = 0.01 , loss_func = \"re\" ) -> Tuple [ float , float ]: \"\"\"\u6c42\u65f6\u95f4\u5e8f\u5217`ts`\u62df\u5408\u76f4\u7ebf\u76f8\u5bf9\u4e8e`x`\u8f74\u7684\u5939\u89d2\u7684\u4f59\u5f26\u503c \u672c\u51fd\u6570\u53ef\u4ee5\u7528\u6765\u5224\u65ad\u65f6\u95f4\u5e8f\u5217\u7684\u589e\u957f\u8d8b\u52bf\u3002\u5f53`angle`\u5904\u4e8e[-1, 0]\u65f6\uff0c\u8d8a\u9760\u8fd10\uff0c\u4e0b\u964d\u8d8a\u5feb\uff1b\u5f53`angle` \u5904\u4e8e[0, 1]\u65f6\uff0c\u8d8a\u63a5\u8fd10\uff0c\u4e0a\u5347\u8d8a\u5feb\u3002 \u5982\u679c`ts`\u65e0\u6cd5\u5f88\u597d\u5730\u62df\u5408\u4e3a\u76f4\u7ebf\uff0c\u5219\u8fd4\u56de[float, None] Examples: >>> ts = np.array([ i for i in range(5)]) >>> round(angle(ts)[1], 3) # degree: 45, rad: pi/2 0.707 >>> ts = np.array([ np.sqrt(3) / 3 * i for i in range(10)]) >>> round(angle(ts)[1],3) # degree: 30, rad: pi/6 0.866 >>> ts = np.array([ -np.sqrt(3) / 3 * i for i in range(7)]) >>> round(angle(ts)[1], 3) # degree: 150, rad: 5*pi/6 -0.866 Args: ts: Returns: \u8fd4\u56de (error, consine(theta))\uff0c\u5373\u62df\u5408\u8bef\u5dee\u548c\u5939\u89d2\u4f59\u5f26\u503c\u3002 \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) if err > threshold : return ( err , None ) v = np . array ([ 1 , a + b ]) vx = np . array ([ 1 , 0 ]) return err , copysign ( np . dot ( v , vx ) / ( norm ( v ) * norm ( vx )), a )","title":"angle()"},{"location":"api/talib/#omicron.talib.core.clustering","text":"\u5c06\u6570\u7ec4 numbers \u5212\u5206\u4e3a n \u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np . array ([ 1 , 1 , 1 , 2 , 4 , 6 , 8 , 7 , 4 , 5 , 6 ]) >>> clustering ( numbers , 2 ) [( 0 , 4 ), ( 4 , 7 )] Returns: Type Description List[Tuple[int, int]] \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 Source code in omicron/talib/core.py def clustering ( numbers : np . ndarray , n : int ) -> List [ Tuple [ int , int ]]: \"\"\"\u5c06\u6570\u7ec4`numbers`\u5212\u5206\u4e3a`n`\u4e2a\u7c07 \u8fd4\u56de\u503c\u4e3a\u4e00\u4e2aList, \u6bcf\u4e00\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a\u5217\u8868\uff0c\u5206\u522b\u4e3a\u7c07\u7684\u8d77\u59cb\u70b9\u548c\u957f\u5ea6\u3002 Examples: >>> numbers = np.array([1,1,1,2,4,6,8,7,4,5,6]) >>> clustering(numbers, 2) [(0, 4), (4, 7)] Returns: \u5212\u5206\u540e\u7684\u7c07\u5217\u8868\u3002 \"\"\" result = ckwrap . cksegs ( numbers , n ) clusters = [] for pos , size in zip ( result . centers , result . sizes ): clusters . append (( int ( pos - size // 2 - 1 ), int ( size ))) return clusters","title":"clustering()"},{"location":"api/talib/#omicron.talib.core.exp_moving_average","text":"Numpy implementation of EMA Source code in omicron/talib/core.py def exp_moving_average ( values , window ): \"\"\"Numpy implementation of EMA\"\"\" weights = np . exp ( np . linspace ( - 1.0 , 0.0 , window )) weights /= weights . sum () a = np . convolve ( values , weights , mode = \"full\" )[: len ( values )] a [: window ] = a [ window ] return a","title":"exp_moving_average()"},{"location":"api/talib/#omicron.talib.core.mean_absolute_error","text":"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> mean_absolute_error ( y , y ) 0.0 >>> mean_absolute_error ( y , y_hat ) 0.8 Parameters: Name Type Description Default y np.array \u771f\u503c\u5e8f\u5217 required y_hat \u6bd4\u8f83\u5e8f\u5217 required Returns: Type Description float \u5e73\u5747\u7edd\u5bf9\u503c\u5dee Source code in omicron/talib/core.py def mean_absolute_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u8fd4\u56de\u9884\u6d4b\u5e8f\u5217\u76f8\u5bf9\u4e8e\u771f\u503c\u5e8f\u5217\u7684\u5e73\u5747\u7edd\u5bf9\u503c\u5dee \u4e24\u4e2a\u5e8f\u5217\u5e94\u8be5\u5177\u6709\u76f8\u540c\u7684\u957f\u5ea6\u3002\u5982\u679c\u5b58\u5728nan\uff0c\u5219nan\u7684\u503c\u4e0d\u8ba1\u5165\u5e73\u5747\u503c\u3002 Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> mean_absolute_error(y, y) 0.0 >>> mean_absolute_error(y, y_hat) 0.8 Args: y (np.array): \u771f\u503c\u5e8f\u5217 y_hat: \u6bd4\u8f83\u5e8f\u5217 Returns: float: \u5e73\u5747\u7edd\u5bf9\u503c\u5dee \"\"\" return nanmean ( np . abs ( y - y_hat ))","title":"mean_absolute_error()"},{"location":"api/talib/#omicron.talib.core.moving_average","text":"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np . arange ( 7 ) >>> moving_average ( ts , 5 ) array ([ nan , nan , nan , nan , 2. , 3. , 4. ]) Parameters: Name Type Description Default ts Sequence the input array required win int the window size required padding if True, then the return will be equal length as input, padding with np.NaN at the beginning True Returns: Type Description ndarray The moving mean of the input array along the specified axis. The output has the same shape as the input. Source code in omicron/talib/core.py def moving_average ( ts : Sequence , win : int , padding = True ) -> np . ndarray : \"\"\"\u751f\u6210ts\u5e8f\u5217\u7684\u79fb\u52a8\u5e73\u5747\u503c Examples: >>> ts = np.arange(7) >>> moving_average(ts, 5) array([nan, nan, nan, nan, 2., 3., 4.]) Args: ts (Sequence): the input array win (int): the window size padding: if True, then the return will be equal length as input, padding with np.NaN at the beginning Returns: The moving mean of the input array along the specified axis. The output has the same shape as the input. \"\"\" ma = move_mean ( ts , win ) if padding : return ma else : return ma [ win - 1 :]","title":"moving_average()"},{"location":"api/talib/#omicron.talib.core.normalize","text":"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 sklearn Examples: >>> X = [[ 1. , - 1. , 2. ], ... [ 2. , 0. , 0. ], ... [ 0. , 1. , - 1. ]] >>> expected = [[ 0.4082 , - 0.4082 , 0.8165 ], ... [ 1. , 0. , 0. ], ... [ 0. , 0.7071 , - 0.7071 ]] >>> X_hat = normalize ( X , scaler = 'unit_vector' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 4 ) >>> expected = [[ 0.5 , - 1. , 1. ], ... [ 1. , 0. , 0. ], ... [ 0. , 1. , - 0.5 ]] >>> X_hat = normalize ( X , scaler = 'maxabs' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 2 ) >>> expected = [[ 0.5 , 0. , 1. ], ... [ 1. , 0.5 , 0.33333333 ], ... [ 0. , 1. , 0. ]] >>> X_hat = normalize ( X , scaler = 'minmax' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) >>> X = [[ 0 , 0 ], ... [ 0 , 0 ], ... [ 1 , 1 ], ... [ 1 , 1 ]] >>> expected = [[ - 1. , - 1. ], ... [ - 1. , - 1. ], ... [ 1. , 1. ], ... [ 1. , 1. ]] >>> X_hat = normalize ( X , scaler = 'standard' ) >>> np . testing . assert_array_almost_equal ( expected , X_hat , decimal = 3 ) Parameters: Name Type Description Default X 2D array required scaler str [description]. Defaults to 'maxabs_scale'. 'maxabs' Source code in omicron/talib/core.py def normalize ( X , scaler = \"maxabs\" ): \"\"\"\u5bf9\u6570\u636e\u8fdb\u884c\u89c4\u8303\u5316\u5904\u7406\u3002 \u5982\u679cscaler\u4e3amaxabs\uff0c\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[-1,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3aunit_vector\uff0c\u5219\u5c06X\u7684\u5404\u5143\u7d20\u538b\u7f29\u5230\u5355\u4f4d\u8303\u6570 \u5982\u679cscaler\u4e3aminmax,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230[0,1]\u4e4b\u95f4 \u5982\u679cscaler\u4e3astandard,\u5219X\u7684\u5404\u5143\u7d20\u88ab\u538b\u7f29\u5230\u5355\u4f4d\u65b9\u5dee\u4e4b\u95f4\uff0c\u4e14\u5747\u503c\u4e3a\u96f6\u3002 \u53c2\u8003 [sklearn] [sklearn]: https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#results Examples: >>> X = [[ 1., -1., 2.], ... [ 2., 0., 0.], ... [ 0., 1., -1.]] >>> expected = [[ 0.4082, -0.4082, 0.8165], ... [ 1., 0., 0.], ... [ 0., 0.7071, -0.7071]] >>> X_hat = normalize(X, scaler='unit_vector') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal=4) >>> expected = [[0.5, -1., 1.], ... [1., 0., 0.], ... [0., 1., -0.5]] >>> X_hat = normalize(X, scaler='maxabs') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 2) >>> expected = [[0.5 , 0. , 1. ], ... [1. , 0.5 , 0.33333333], ... [0. , 1. , 0. ]] >>> X_hat = normalize(X, scaler='minmax') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal= 3) >>> X = [[0, 0], ... [0, 0], ... [1, 1], ... [1, 1]] >>> expected = [[-1., -1.], ... [-1., -1.], ... [ 1., 1.], ... [ 1., 1.]] >>> X_hat = normalize(X, scaler='standard') >>> np.testing.assert_array_almost_equal(expected, X_hat, decimal = 3) Args: X (2D array): scaler (str, optional): [description]. Defaults to 'maxabs_scale'. \"\"\" if scaler == \"maxabs\" : return MaxAbsScaler () . fit_transform ( X ) elif scaler == \"unit_vector\" : return sklearn . preprocessing . normalize ( X , norm = \"l2\" ) elif scaler == \"minmax\" : return minmax_scale ( X ) elif scaler == \"standard\" : return StandardScaler () . fit_transform ( X )","title":"normalize()"},{"location":"api/talib/#omicron.talib.core.pct_error","text":"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np . arange ( 5 ) >>> y_hat = np . arange ( 5 ) >>> y_hat [ 4 ] = 0 >>> pct_error ( y , y_hat ) 0.4 Parameters: Name Type Description Default y np.array [description] required y_hat np.array [description] required Returns: Type Description float [description] Source code in omicron/talib/core.py def pct_error ( y : np . array , y_hat : np . array ) -> float : \"\"\"\u76f8\u5bf9\u4e8e\u5e8f\u5217\u7b97\u672f\u5747\u503c\u7684\u8bef\u5dee\u503c Examples: >>> y = np.arange(5) >>> y_hat = np.arange(5) >>> y_hat[4] = 0 >>> pct_error(y, y_hat) 0.4 Args: y (np.array): [description] y_hat (np.array): [description] Returns: float: [description] \"\"\" mae = mean_absolute_error ( y , y_hat ) return mae / nanmean ( np . abs ( y ))","title":"pct_error()"},{"location":"api/talib/#omicron.talib.core.polyfit","text":"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [ i for i in range ( 5 )] >>> err , ( a , b ) = polyfit ( ts , deg = 1 ) >>> print ( round ( err , 3 ), round ( a , 1 )) 0.0 1.0 Parameters: Name Type Description Default ts Sequence \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 required deg int \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 2 loss_func str \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a mae , rmse , mse \u6216 re \u3002Defaults to re (relative_error) 're' Returns: Type Description [Tuple] \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) Source code in omicron/talib/core.py def polyfit ( ts : Sequence , deg : int = 2 , loss_func = \"re\" ) -> Tuple : \"\"\"\u5bf9\u7ed9\u5b9a\u7684\u65f6\u95f4\u5e8f\u5217\u8fdb\u884c\u76f4\u7ebf/\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u3002 \u4e8c\u6b21\u66f2\u7ebf\u53ef\u4ee5\u62df\u5408\u5230\u53cd\u751f\u53cd\u8f6c\u7684\u884c\u60c5\uff0c\u5982\u5706\u5f27\u5e95\u3001\u5706\u5f27\u9876\uff1b\u4e5f\u53ef\u4ee5\u62df\u5408\u5230\u4e0a\u8ff0\u8d8b\u52bf\u4e2d\u7684\u5355\u8fb9\u8d70\u52bf\uff0c\u5373\u5176\u4e2d\u4e00\u6bb5\u66f2\u7ebf\u3002\u5bf9\u4e8e\u5982\u957f\u671f\u5747\u7ebf\uff0c\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u8d70\u52bf\u53ef\u80fd\u5448\u73b0\u4e3a\u4e00\u6761\u76f4\u7ebf\uff0c\u6545\u4e5f\u53ef\u7528\u6b64\u51fd\u6570\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\u3002 \u4e3a\u4fbf\u4e8e\u5728\u4e0d\u540c\u54c1\u79cd\u3001\u4e0d\u540c\u7684\u65f6\u95f4\u4e4b\u95f4\u5bf9\u8bef\u5dee\u3001\u7cfb\u6570\u8fdb\u884c\u6bd4\u8f83\uff0c\u8bf7\u4e8b\u5148\u5bf9ts\u8fdb\u884c\u5f52\u4e00\u5316\u3002 \u5982\u679c\u9047\u5230\u65e0\u6cd5\u62df\u5408\u7684\u60c5\u51b5\uff08\u5f02\u5e38\uff09\uff0c\u5c06\u8fd4\u56de\u4e00\u4e2a\u975e\u5e38\u5927\u7684\u8bef\u5dee\uff0c\u5e76\u5c06\u5176\u5b83\u9879\u7f6e\u4e3anp.nan Examples: >>> ts = [i for i in range(5)] >>> err, (a, b) = polyfit(ts, deg=1) >>> print(round(err, 3), round(a, 1)) 0.0 1.0 Args: ts (Sequence): \u5f85\u62df\u5408\u7684\u65f6\u95f4\u5e8f\u5217 deg (int): \u5982\u679c\u8981\u8fdb\u884c\u76f4\u7ebf\u62df\u5408\uff0c\u53d61\uff1b\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\u53d62. Defaults to 2 loss_func (str): \u8bef\u5dee\u8ba1\u7b97\u65b9\u6cd5\uff0c\u53d6\u503c\u4e3a`mae`, `rmse`,`mse` \u6216`re`\u3002Defaults to `re` (relative_error) Returns: [Tuple]: \u5982\u679c\u4e3a\u76f4\u7ebf\u62df\u5408\uff0c\u8fd4\u56de\u8bef\u5dee\uff0c(a,b)(\u4e00\u6b21\u9879\u7cfb\u6570\u548c\u5e38\u6570)\u3002\u5982\u679c\u4e3a\u4e8c\u6b21\u66f2\u7ebf\u62df\u5408\uff0c\u8fd4\u56de \u8bef\u5dee, (a,b,c)(\u4e8c\u6b21\u9879\u3001\u4e00\u6b21\u9879\u548c\u5e38\u91cf\uff09, (vert_x, vert_y)(\u9876\u70b9\u5904\u7684index\uff0c\u9876\u70b9\u503c) \"\"\" if deg not in ( 1 , 2 ): raise ValueError ( \"deg must be 1 or 2\" ) try : if any ( np . isnan ( ts )): raise ValueError ( \"ts contains nan\" ) x = np . array ( list ( range ( len ( ts )))) z = np . polyfit ( x , ts , deg = deg ) p = np . poly1d ( z ) ts_hat = np . array ([ p ( xi ) for xi in x ]) if loss_func == \"mse\" : error = np . mean ( np . square ( ts - ts_hat )) elif loss_func == \"rmse\" : error = np . sqrt ( np . mean ( np . square ( ts - ts_hat ))) elif loss_func == \"mae\" : error = mean_absolute_error ( ts , ts_hat ) else : # defaults to relative error error = pct_error ( ts , ts_hat ) if deg == 2 : a , b , c = z [ 0 ], z [ 1 ], z [ 2 ] axis_x = - b / ( 2 * a ) if a != 0 : axis_y = ( 4 * a * c - b * b ) / ( 4 * a ) else : axis_y = None return error , z , ( axis_x , axis_y ) elif deg == 1 : return error , z except Exception : error = 1e9 if deg == 1 : return error , ( np . nan , np . nan ) else : return error , ( np . nan , np . nan , np . nan ), ( np . nan , np . nan )","title":"polyfit()"},{"location":"api/talib/#omicron.talib.core.slope","text":"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Parameters: Name Type Description Default ts np.array [description] required loss_func str [description]. Defaults to 're'. 're' Source code in omicron/talib/core.py def slope ( ts : np . array , loss_func = \"re\" ): \"\"\"\u6c42ts\u8868\u793a\u7684\u76f4\u7ebf\uff08\u5982\u679c\u80fd\u62df\u5408\u6210\u76f4\u7ebf\u7684\u8bdd\uff09\u7684\u659c\u7387 Args: ts (np.array): [description] loss_func (str, optional): [description]. Defaults to 're'. \"\"\" err , ( a , b ) = polyfit ( ts , deg = 1 , loss_func = loss_func ) return err , a","title":"slope()"},{"location":"api/talib/#omicron.talib.core.smooth","text":"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Parameters: Name Type Description Default ts np.array [description] required win int [description] required poly_order int [description]. Defaults to 1. 1 Source code in omicron/talib/core.py def smooth ( ts : np . array , win : int , poly_order = 1 , mode = \"interp\" ): \"\"\"\u5e73\u6ed1\u5e8f\u5217ts\uff0c\u4f7f\u7528\u7a97\u53e3\u5927\u5c0f\u4e3awin\u7684\u5e73\u6ed1\u6a21\u578b\uff0c\u9ed8\u8ba4\u4f7f\u7528\u7ebf\u6027\u6a21\u578b \u63d0\u4f9b\u672c\u51fd\u6570\u4e3b\u8981\u57fa\u4e8e\u8fd9\u6837\u7684\u8003\u8651\uff1a omicron\u7684\u4f7f\u7528\u8005\u53ef\u80fd\u5e76\u4e0d\u719f\u6089\u4fe1\u53f7\u5904\u7406\u7684\u6982\u5ff5\uff0c\u8fd9\u91cc\u76f8\u5f53\u4e8e\u63d0\u4f9b\u4e86\u76f8\u5173\u529f\u80fd\u7684\u4e00\u4e2a\u5165\u53e3\u3002 Args: ts (np.array): [description] win (int): [description] poly_order (int, optional): [description]. Defaults to 1. \"\"\" return savgol_filter ( ts , win , poly_order , mode = mode )","title":"smooth()"},{"location":"api/talib/#omicron.talib.core.weighted_moving_average","text":"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Parameters: Name Type Description Default ts np.array [description] required win int [description] required Returns: Type Description np.array [description] Source code in omicron/talib/core.py def weighted_moving_average ( ts : np . array , win : int ) -> np . array : \"\"\"\u8ba1\u7b97\u52a0\u6743\u79fb\u52a8\u5e73\u5747 Args: ts (np.array): [description] win (int): [description] Returns: np.array: [description] \"\"\" w = [ 2 * ( i + 1 ) / ( win * ( win + 1 )) for i in range ( win )] return np . convolve ( ts , w , \"valid\" )","title":"weighted_moving_average()"},{"location":"api/talib/#omicron.talib.morph","text":"\u5f62\u6001\u68c0\u6d4b\u76f8\u5173\u65b9\u6cd5","title":"morph"},{"location":"api/talib/#omicron.talib.morph.BreakoutFlag","text":"An enumeration. Source code in omicron/talib/morph.py class BreakoutFlag ( IntEnum ): UP = 1 DOWN = - 1 NONE = 0","title":"BreakoutFlag"},{"location":"api/talib/#omicron.talib.morph.CrossFlag","text":"An enumeration. Source code in omicron/talib/morph.py class CrossFlag ( IntEnum ): UPCROSS = 1 DOWNCROSS = - 1 NONE = 0","title":"CrossFlag"},{"location":"api/talib/#omicron.talib.morph.breakout","text":"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys 0.01 downthres float \u8bf7\u53c2\u8003 peaks_and_valleys -0.01 confirm int \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 1 Returns: Type Description BreakoutFlag \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 Source code in omicron/talib/morph.py def breakout ( ts : np . ndarray , upthres : float = 0.01 , downthres : float = - 0.01 , confirm : int = 1 ) -> BreakoutFlag : \"\"\"\u68c0\u6d4b\u65f6\u95f4\u5e8f\u5217\u662f\u5426\u7a81\u7834\u4e86\u538b\u529b\u7ebf\uff08\u6574\u7406\u7ebf\uff09 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] confirm (int, optional): \u7ecf\u8fc7\u591a\u5c11\u4e2abars\u540e\uff0c\u624d\u786e\u8ba4\u7a81\u7834\u3002\u9ed8\u8ba4\u4e3a1 Returns: \u5982\u679c\u4e0a\u5411\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de1\uff0c\u5982\u679c\u5411\u4e0b\u7a81\u7834\u538b\u529b\u7ebf\uff0c\u8fd4\u56de-1\uff0c\u5426\u5219\u8fd4\u56de0 \"\"\" support , resist , _ = support_resist_lines ( ts [: - confirm ], upthres , downthres ) x0 = len ( ts ) - confirm - 1 x = list ( range ( len ( ts ) - confirm , len ( ts ))) if resist is not None : if np . all ( ts [ x ] > resist ( x )) and ts [ x0 ] <= resist ( x0 ): return BreakoutFlag . UP if support is not None : if np . all ( ts [ x ] < support ( x )) and ts [ x0 ] >= support ( x0 ): return BreakoutFlag . DOWN return BreakoutFlag . NONE","title":"breakout()"},{"location":"api/talib/#omicron.talib.morph.cross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 Returns: Type Description CrossFlag (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g Source code in omicron/talib/morph.py def cross ( f : np . ndarray , g : np . ndarray ) -> CrossFlag : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u76f8\u4ea4\u3002\u5982\u679c\u4e24\u4e2a\u5e8f\u5217\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u4ea4\u70b9\uff0c\u5219\u8fd4\u56de1\u8868\u660ef\u4e0a\u4ea4g\uff1b-1\u8868\u660ef\u4e0b\u4ea4g \u672c\u65b9\u6cd5\u53ef\u7528\u4ee5\u5224\u65ad\u4e24\u6761\u5747\u7ebf\u662f\u5426\u76f8\u4ea4\u3002 returns: (flag, index), \u5176\u4e2dflag\u53d6\u503c\u4e3a\uff1a 0 \u65e0\u6548 -1 f\u5411\u4e0b\u4ea4\u53c9g 1 f\u5411\u4e0a\u4ea4\u53c9g \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 0 : return CrossFlag . NONE , 0 # \u5982\u679c\u5b58\u5728\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u4ea4\u70b9\uff0c\u53d6\u6700\u540e\u4e00\u4e2a idx = indices [ - 1 ] if f [ idx ] < g [ idx ]: return CrossFlag . UPCROSS , idx elif f [ idx ] > g [ idx ]: return CrossFlag . DOWNCROSS , idx else : return CrossFlag ( np . sign ( g [ idx - 1 ] - f [ idx - 1 ])), idx","title":"cross()"},{"location":"api/talib/#omicron.talib.morph.energy_hump","text":"\u68c0\u6d4b bars \u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Parameters: Name Type Description Default bars [('frame', ' Optional [ Tuple [ int , int ]]: \"\"\"\u68c0\u6d4b`bars`\u4e2d\u662f\u5426\u5b58\u5728\u4e24\u6ce2\u4ee5\u4e0a\u91cf\u80fd\u5267\u70c8\u589e\u52a0\u7684\u60c5\u5f62\uff08\u80fd\u91cf\u9a7c\u5cf0\uff09\uff0c\u8fd4\u56de\u6700\u540e\u4e00\u6ce2\u8ddd\u73b0\u5728\u7684\u4f4d\u7f6e\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \u6ce8\u610f\u5982\u679c\u6700\u540e\u4e00\u4e2a\u80fd\u91cf\u9a7c\u5cf0\u8ddd\u73b0\u5728\u8fc7\u8fdc\uff08\u6bd4\u5982\u8d85\u8fc710\u4e2abar),\u53ef\u80fd\u610f\u5473\u7740\u8d44\u91d1\u5df2\u7ecf\u9003\u79bb\uff0c\u80fd\u91cf\u5df2\u7ecf\u8017\u5c3d\u3002 Args: bars: \u884c\u60c5\u6570\u636e thresh: \u6700\u540e\u4e00\u6ce2\u91cf\u5fc5\u987b\u5927\u4e8e20\u5929\u5747\u91cf\u7684\u500d\u6570\u3002 Returns: \u5982\u679c\u4e0d\u5b58\u5728\u80fd\u91cf\u9a7c\u5cf0\u7684\u60c5\u5f62\uff0c\u5219\u8fd4\u56deNone\uff0c\u5426\u5219\u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u9a7c\u5cf0\u79bb\u73b0\u5728\u7684\u8ddd\u79bb\u53ca\u533a\u95f4\u957f\u5ea6\u3002 \"\"\" vol = bars [ \"volume\" ] std = np . std ( vol [ 1 :] / vol [: - 1 ]) pvs = peak_valley_pivots ( vol , std , 0 ) frames = bars [ \"frame\" ] pvs [ 0 ] = 0 pvs [ - 1 ] = - 1 peaks = np . argwhere ( pvs == 1 ) mn = np . mean ( vol [ peaks ]) # \u9876\u70b9\u4e0d\u80fd\u7f29\u91cf\u5230\u5c16\u5cf0\u5747\u503c\u4ee5\u4e0b real_peaks = np . intersect1d ( np . argwhere ( vol > mn ), peaks ) if len ( real_peaks ) < 2 : return None logger . debug ( \"found %s peaks at %s \" , len ( real_peaks ), frames [ real_peaks ]) lp = real_peaks [ - 1 ] ma = moving_average ( vol , 20 )[ lp ] if vol [ lp ] < ma * thresh : logger . debug ( \"vol of last peak[ %s ] is less than mean_vol(20) * thresh[ %s ]\" , vol [ lp ], ma * thresh , ) return None return len ( bars ) - real_peaks [ - 1 ], real_peaks [ - 1 ] - real_peaks [ 0 ]","title":"energy_hump()"},{"location":"api/talib/#omicron.talib.morph.inverse_vcross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Parameters: Name Type Description Default f np.array [description] required g np.array [description] required Returns: Type Description Tuple [description] Source code in omicron/talib/morph.py def inverse_vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0e\u5e8f\u5217g\u5b58\u5728^\u578b\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b \u76f8\u4ea4\u3002\u53ef\u7528\u4e8e\u5224\u65ad\u89c1\u9876\u7279\u5f81\u7b49\u573a\u5408\u3002 Args: f (np.array): [description] g (np.array): [description] Returns: Tuple: [description] \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] < g [ idx0 ] and f [ idx1 ] > g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"inverse_vcross()"},{"location":"api/talib/#omicron.talib.morph.peaks_and_valleys","text":"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required up_thresh float \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee None down_thresh float \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 None Returns: Type Description np.ndarray \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 Source code in omicron/talib/morph.py def peaks_and_valleys ( ts : np . ndarray , up_thresh : Optional [ float ] = None , down_thresh : Optional [ float ] = None , ) -> np . ndarray : \"\"\"\u5bfb\u627ets\u4e2d\u7684\u6ce2\u5cf0\u548c\u6ce2\u8c37\uff0c\u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002\u5982\u679c\u4e3a1\uff0c\u5219\u4e3a\u6ce2\u5cf0\uff1b\u5982\u679c\u4e3a-1\uff0c\u5219\u4e3a\u6ce2\u8c37\u3002 \u672c\u51fd\u6570\u76f4\u63a5\u4f7f\u7528\u4e86zigzag\u4e2d\u7684peak_valley_pivots. \u6709\u5f88\u591a\u65b9\u6cd5\u53ef\u4ee5\u5b9e\u73b0\u672c\u529f\u80fd\uff0c\u6bd4\u5982scipy.signals.find_peaks_cwt, peak_valley_pivots\u7b49\u3002\u672c\u51fd\u6570\u66f4\u9002\u5408\u91d1\u878d\u65f6\u95f4\u5e8f\u5217\uff0c\u5e76\u4e14\u4f7f\u7528\u4e86cython\u52a0\u901f\u3002 Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 up_thresh (float): \u6ce2\u5cf0\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee down_thresh (float): \u6ce2\u8c37\u7684\u9608\u503c\uff0c\u5982\u679c\u4e3aNone,\u5219\u4f7f\u7528ts\u53d8\u5316\u7387\u7684\u4e8c\u500d\u6807\u51c6\u5dee\u4e58\u4ee5-1 Returns: np.ndarray: \u8fd4\u56de\u6570\u7ec4\u6307\u793a\u5728\u8be5\u4f4d\u7f6e\u4e0a\u662f\u5426\u4e3a\u6ce2\u5cf0\u6216\u6ce2\u8c37\u3002 \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) if any ([ up_thresh is None , down_thresh is None ]): change_rate = ts [ 1 :] / ts [: - 1 ] - 1 std = np . std ( change_rate ) up_thresh = up_thresh or 2 * std down_thresh = down_thresh or - 2 * std return peak_valley_pivots ( ts , up_thresh , down_thresh )","title":"peaks_and_valleys()"},{"location":"api/talib/#omicron.talib.morph.plateaus","text":"\u7edf\u8ba1\u6570\u7ec4 numbers \u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7 fall_in_range_ratio \u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Parameters: Name Type Description Default numbers ndarray \u8f93\u5165\u6570\u7ec4 required min_size int \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 required fall_in_range_ratio float \u8d85\u8fc7 fall_in_range_ratio \u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 0.97 Returns: Type Description List[Tuple] \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 Source code in omicron/talib/morph.py def plateaus ( numbers : np . ndarray , min_size : int , fall_in_range_ratio : float = 0.97 ) -> List [ Tuple ]: \"\"\"\u7edf\u8ba1\u6570\u7ec4`numbers`\u4e2d\u7684\u53ef\u80fd\u5b58\u5728\u7684\u5e73\u53f0\u6574\u7406\u3002 \u5982\u679c\u4e00\u4e2a\u6570\u7ec4\u4e2d\u5b58\u5728\u7740\u5b50\u6570\u7ec4\uff0c\u4f7f\u5f97\u5176\u5143\u7d20\u4e0e\u5747\u503c\u7684\u8ddd\u79bb\u843d\u5728\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u6bd4\u4f8b\u8d85\u8fc7`fall_in_range_ratio`\u7684\uff0c\u5219\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6ee1\u8db3\u5e73\u53f0\u6574\u7406\u3002 Args: numbers: \u8f93\u5165\u6570\u7ec4 min_size: \u5e73\u53f0\u7684\u6700\u5c0f\u957f\u5ea6 fall_in_range_ratio: \u8d85\u8fc7`fall_in_range_ratio`\u6bd4\u4f8b\u7684\u5143\u7d20\u843d\u5728\u5747\u503c\u7684\u4e09\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\uff0c\u5c31\u8ba4\u4e3a\u8be5\u5b50\u6570\u7ec4\u6784\u6210\u4e00\u4e2a\u5e73\u53f0 Returns: \u5e73\u53f0\u7684\u8d77\u59cb\u4f4d\u7f6e\u548c\u957f\u5ea6\u7684\u6570\u7ec4 \"\"\" if numbers . size <= min_size : n = 1 else : n = numbers . size // min_size clusters = clustering ( numbers , n ) plats = [] for ( start , length ) in clusters : if length < min_size : continue y = numbers [ start : start + length ] mean = np . mean ( y ) std = np . std ( y ) inrange = len ( y [ np . abs ( y - mean ) < 3 * std ]) ratio = inrange / length if ratio >= fall_in_range_ratio : plats . append (( start , length )) return plats","title":"plateaus()"},{"location":"api/talib/#omicron.talib.morph.rsi_bottom_distance","text":"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u4f4e\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u4f4e\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u4f4e\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u4f4e\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : low_watermark , _ , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u8c37\u503cRSI<30 valley_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] # RSI\u4f4e\u6c34\u5e73\u7684\u6700\u5927\u503c\uff1a\u4f4e\u6c34\u5e73*1.01 low_rsi_index = np . where ( rsi <= low_watermark * 1.01 )[ 0 ] if len ( valley_rsi_index ) > 0 : distance = len ( rsi ) - 1 - valley_rsi_index [ - 1 ] if len ( low_rsi_index ) > 0 : if low_rsi_index [ - 1 ] >= valley_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - low_rsi_index [ - 1 ] return distance","title":"rsi_bottom_distance()"},{"location":"api/talib/#omicron.talib.morph.rsi_bottom_divergent","text":"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 30 Returns: Type Description int \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_bottom_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 30 ) -> int : \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u5e95\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u5e95\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c30\uff0820\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6<30\u7684\u5c40\u90e8\u6700\u4f4e\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u5e95\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u5e95\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) valley_index = np . where (( pivots == - 1 ) & ( rsi <= rsi_limit ))[ 0 ] if len ( valley_index ) >= 2 : if ( close [ valley_index [ - 1 ]] < close [ valley_index [ - 2 ]]) and ( rsi [ valley_index [ - 1 ]] > rsi [ valley_index [ - 2 ]] ): bottom_dev_distance = length - 1 - valley_index [ - 1 ] return bottom_dev_distance","title":"rsi_bottom_divergent()"},{"location":"api/talib/#omicron.talib.morph.rsi_predict_price","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[float, float] \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 Source code in omicron/talib/morph.py def rsi_predict_price ( close : np . ndarray , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\uff0c\u6839\u636e\u6700\u8fd1\u7684\u4e24\u4e2aRSI\u7684\u6781\u5c0f\u503c\u548c\u6781\u5927\u503c\u9884\u6d4b\u4e0b\u4e00\u4e2a\u5468\u671f\u53ef\u80fd\u8fbe\u5230\u7684\u6700\u4f4e\u4ef7\u683c\u548c\u6700\u9ad8\u4ef7\u683c\u3002 \u5176\u539f\u7406\u662f\uff0c\u4ee5\u9884\u6d4b\u6700\u8fd1\u7684\u4e24\u4e2a\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\uff0c\u6c42\u51fa\u5176\u76f8\u5bf9\u5e94\u7684RSI\u503c\uff0c\u6c42\u51fa\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7RSI\u7684\u5747\u503c\uff0c \u82e5\u53ea\u6709\u4e00\u4e2a\u5219\u53d6\u6700\u8fd1\u7684\u4e00\u4e2a\u3002\u518d\u7531RSI\u516c\u5f0f\uff0c\u53cd\u63a8\u4ef7\u683c\u3002\u6b64\u65f6\u8fd4\u56de\u503c\u4e3a(None, float)\uff0c\u5373\u53ea\u6709\u6700\u9ad8\u4ef7\uff0c\u6ca1\u6709\u6700\u4f4e\u4ef7\u3002\u53cd\u4e4b\u4ea6\u7136\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u6570\u7ec4[predicted_low_price, predicted_high_price], \u6570\u7ec4\u7b2c\u4e00\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u4f4e\u4ef7\u3002 \u7b2c\u4e8c\u4e2a\u503c\u4e3a\u5229\u7528\u8fbe\u5230\u4e4b\u524d\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u9884\u6d4b\u7684\u6700\u9ad8\u4ef7\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) valley_rsi , peak_rsi , _ = rsi_watermarks ( close , thresh = thresh ) pivot = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivot [ 0 ], pivot [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e price_change = pd . Series ( close ) . diff ( 1 ) . values ave_price_change = ( abs ( price_change )[ - 6 :] . mean ()) * 5 ave_price_raise = ( np . maximum ( price_change , 0 )[ - 6 :] . mean ()) * 5 if valley_rsi is not None : predicted_low_change = ( ave_price_change ) - ave_price_raise / ( 0.01 * valley_rsi ) if predicted_low_change > 0 : predicted_low_change = 0 predicted_low_price = close [ - 1 ] + predicted_low_change else : predicted_low_price = None if peak_rsi is not None : predicted_high_change = ( ave_price_raise - ave_price_change ) / ( 0.01 * peak_rsi - 1 ) - ave_price_change if predicted_high_change < 0 : predicted_high_change = 0 predicted_high_price = close [ - 1 ] + predicted_high_change else : predicted_high_price = None return predicted_low_price , predicted_high_price","title":"rsi_predict_price()"},{"location":"api/talib/#omicron.talib.morph.rsi_top_distance","text":"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description int \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_distance ( close : np . array , thresh : Tuple [ float , float ] = None ) -> int : \"\"\"\u6839\u636e\u7ed9\u5b9a\u7684\u6536\u76d8\u4ef7\uff0c\u8ba1\u7b97\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\uff0c \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\u3002 \u8fd4\u56de\u503c\u4e3a\u8ddd\u79bb\u6574\u6570\uff0c\u4e0d\u6ee1\u8db3\u6761\u4ef6\u5219\u8fd4\u56deNone\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51farsi\u9ad8\u6c34\u5e73\u7684\u8ddd\u79bb\u3002 \u5982\u679c\u4ece\u4e0a\u4e00\u4e2a\u6700\u9ad8\u70b9rsi\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5e76\u672a\u53d1\u51fa\u9ad8\u6c34\u5e73\u4fe1\u53f7\uff0c \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u4e0a\u4e00\u4e2a\u53d1\u51fa\u6700\u9ad8\u70b9rsi\u7684\u8ddd\u79bb\u3002 \u9664\u6b64\u4e4b\u5916\uff0c\u8fd4\u56deNone\u3002\"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) rsi = ta . RSI ( close , 6 ) watermarks = rsi_watermarks ( close , thresh ) if watermarks is not None : _ , high_watermark , _ = watermarks pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u5cf0\u503cRSI>70 peak_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] # RSI\u9ad8\u6c34\u5e73\u7684\u6700\u5c0f\u503c\uff1a\u9ad8\u6c34\u5e73*0.99 high_rsi_index = np . where ( rsi >= high_watermark * 0.99 )[ 0 ] if len ( peak_rsi_index ) > 0 : distance = len ( rsi ) - 1 - peak_rsi_index [ - 1 ] if len ( high_rsi_index ) > 0 : if high_rsi_index [ - 1 ] >= peak_rsi_index [ - 1 ]: distance = len ( rsi ) - 1 - high_rsi_index [ - 1 ] return distance","title":"rsi_top_distance()"},{"location":"api/talib/#omicron.talib.morph.rsi_top_divergent","text":"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Parameters: Name Type Description Default close np.array \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 required thresh Tuple[float, float] \u8bf7\u53c2\u8003 peaks_and_valleys None rsi_limit float RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 70 Returns: Type Description Tuple[int, int] \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Source code in omicron/talib/morph.py def rsi_top_divergent ( close : np . array , thresh : Tuple [ float , float ] = None , rsi_limit : float = 70 ) -> Tuple [ int , int ]: \"\"\"\u5bfb\u627e\u6700\u8fd1\u6ee1\u8db3\u6761\u4ef6\u7684rsi\u9876\u80cc\u79bb\u3002 \u8fd4\u56de\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 Args: close (np.array): \u65f6\u95f4\u5e8f\u5217\u6536\u76d8\u4ef7 thresh (Tuple[float, float]): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] rsi_limit (float, optional): RSI\u53d1\u751f\u9876\u80cc\u79bb\u65f6\u7684\u9608\u503c, \u9ed8\u8ba4\u503c70\uff0880\u6548\u679c\u66f4\u4f73\uff0c\u4f46\u662f\u68c0\u6d4b\u51fa\u6765\u6570\u91cf\u592a\u5c11\uff09\uff0c\u5373\u53ea\u8fc7\u6ee4RSI6>70\u7684\u5c40\u90e8\u6700\u9ad8\u6536\u76d8\u4ef7\u3002 Returns: \u8fd4\u56deint\u7c7b\u578b\u7684\u6574\u6570\uff0c\u8868\u793a\u6700\u540e\u4e00\u4e2a\u6570\u636e\u5230\u6700\u8fd1\u9876\u80cc\u79bb\u53d1\u751f\u70b9\u7684\u8ddd\u79bb\uff1b\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u9876\u80cc\u79bb\uff0c\u8fd4\u56deNone\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 length = len ( close ) peak_index = np . where (( pivots == 1 ) & ( rsi >= rsi_limit ))[ 0 ] if len ( peak_index ) >= 2 : if ( close [ peak_index [ - 1 ]] > close [ peak_index [ - 2 ]]) and ( rsi [ peak_index [ - 1 ]] < rsi [ peak_index [ - 2 ]] ): top_dev_distance = length - 1 - peak_index [ - 1 ] return top_dev_distance","title":"rsi_top_divergent()"},{"location":"api/talib/#omicron.talib.morph.rsi_watermarks","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Parameters: Name Type Description Default close np.array \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 None Returns: Type Description Tuple[float, float, float] \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 Source code in omicron/talib/morph.py def rsi_watermarks ( close : np . array , thresh : Tuple [ float , float ] = None ) -> Tuple [ float , float , float ]: \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u9876\u548c\u5e95\u7684\u9608\u503c\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u8c37\u548c\u5cf0\u5904RSI\u5747\u503c\uff0c\u6700\u540e\u4e00\u4e2aRSI6\u503c\u3002 \u5176\u4e2dclose\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u8fd4\u56de\u503c\u4e2d\uff0c\u4e00\u4e2a\u4e3alow_wartermark\uff08\u8c37\u5e95\u5904RSI\u503c\uff09\uff0c \u4e00\u4e2a\u4e3ahigh_wartermark\uff08\u9ad8\u5cf0\u5904RSI\u503c)\uff0c\u4e00\u4e2a\u4e3aRSI6\u7684\u6700\u540e\u4e00\u4e2a\u503c\uff0c\u7528\u4ee5\u5bf9\u6bd4\u524d\u4e24\u4e2a\u8b66\u6212\u503c\u3002 Args: close (np.array): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : None\u9002\u7528\u6240\u6709\u80a1\u7968\uff0c\u4e0d\u5fc5\u66f4\u6539\uff0c\u4e5f\u53ef\u81ea\u884c\u8bbe\u7f6e\u3002 Returns: \u8fd4\u56de\u6570\u7ec4[low_watermark, high_watermark\uff0c rsi[-1]], \u7b2c\u4e00\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u4f4e\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\uff0c \u7b2c\u4e8c\u4e2a\u4e3a\u6700\u8fd1\u4e24\u4e2a\u6700\u9ad8\u6536\u76d8\u4ef7\u7684RSI\u5747\u503c\u3002 \u82e5\u4f20\u5165\u6536\u76d8\u4ef7\u53ea\u6709\u4e00\u4e2a\u6700\u503c\uff0c\u53ea\u8fd4\u56de\u4e00\u4e2a\u3002\u6ca1\u6709\u6700\u503c\uff0c\u5219\u8fd4\u56deNone, \u7b2c\u4e09\u4e2a\u4e3a\u5b9e\u9645\u7684\u6700\u540eRSI6\u7684\u503c\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) if close . dtype != np . float64 : close = close . astype ( np . float64 ) rsi = ta . RSI ( close , 6 ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) pivots [ 0 ], pivots [ - 1 ] = 0 , 0 # \u6390\u5934\u53bb\u5c3e # \u5cf0\u503cRSI>70; \u8c37\u5904\u7684RSI<30; peaks_rsi_index = np . where (( rsi > 70 ) & ( pivots == 1 ))[ 0 ] valleys_rsi_index = np . where (( rsi < 30 ) & ( pivots == - 1 ))[ 0 ] if len ( peaks_rsi_index ) == 0 : high_watermark = None elif len ( peaks_rsi_index ) == 1 : high_watermark = rsi [ peaks_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u5747\u503c\u6765\u786e\u5b9a\u8d70\u52bf high_watermark = np . nanmean ( rsi [ peaks_rsi_index [ - 2 :]]) if len ( valleys_rsi_index ) == 0 : low_watermark = None elif len ( valleys_rsi_index ) == 1 : low_watermark = rsi [ valleys_rsi_index [ 0 ]] else : # \u6709\u4e24\u4e2a\u4ee5\u4e0a\u7684\u5cf0\uff0c\u901a\u8fc7\u6700\u8fd1\u7684\u4e24\u4e2a\u5cf0\u6765\u786e\u5b9a\u8d70\u52bf low_watermark = np . nanmean ( rsi [ valleys_rsi_index [ - 2 :]]) return low_watermark , high_watermark , rsi [ - 1 ]","title":"rsi_watermarks()"},{"location":"api/talib/#omicron.talib.morph.support_resist_lines","text":"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 def show_support_resist_lines ( ts ): import plotly.graph_objects as go fig = go . Figure () support , resist , x_start = support_resist_lines ( ts , 0.03 , - 0.03 ) fig . add_trace ( go . Scatter ( x = np . arange ( len ( ts )), y = ts )) x = np . arange ( len ( ts ))[ x_start :] fig . add_trace ( go . Line ( x = x , y = support ( x ))) fig . add_trace ( go . Line ( x = x , y = resist ( x ))) fig . show () np . random . seed ( 1978 ) X = np . cumprod ( 1 + np . random . randn ( 100 ) * 0.01 ) show_support_resist_lines ( X ) the above code will show this Parameters: Name Type Description Default ts np.ndarray \u65f6\u95f4\u5e8f\u5217 required upthres float \u8bf7\u53c2\u8003 peaks_and_valleys None downthres float \u8bf7\u53c2\u8003 peaks_and_valleys None Returns: Type Description Tuple[Callable, Callable, numpy.ndarray] \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone Source code in omicron/talib/morph.py def support_resist_lines ( ts : np . ndarray , upthres : float = None , downthres : float = None ) -> Tuple [ Callable , Callable , np . ndarray ]: \"\"\"\u8ba1\u7b97\u65f6\u95f4\u5e8f\u5217\u7684\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf \u4f7f\u7528\u6700\u8fd1\u7684\u4e24\u4e2a\u9ad8\u70b9\u8fde\u63a5\u6210\u9634\u529b\u7ebf\uff0c\u4e24\u4e2a\u4f4e\u70b9\u8fde\u63a5\u6210\u652f\u6491\u7ebf\u3002 Examples: ```python def show_support_resist_lines(ts): import plotly.graph_objects as go fig = go.Figure() support, resist, x_start = support_resist_lines(ts, 0.03, -0.03) fig.add_trace(go.Scatter(x=np.arange(len(ts)), y=ts)) x = np.arange(len(ts))[x_start:] fig.add_trace(go.Line(x=x, y = support(x))) fig.add_trace(go.Line(x=x, y = resist(x))) fig.show() np.random.seed(1978) X = np.cumprod(1 + np.random.randn(100) * 0.01) show_support_resist_lines(X) ``` the above code will show this ![](https://images.jieyu.ai/images/202204/support_resist.png) Args: ts (np.ndarray): \u65f6\u95f4\u5e8f\u5217 upthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] downthres (float, optional): \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u652f\u6491\u7ebf\u548c\u963b\u529b\u7ebf\u7684\u8ba1\u7b97\u51fd\u6570\u53ca\u8d77\u59cb\u70b9\u5750\u6807\uff0c\u5982\u679c\u6ca1\u6709\u652f\u6491\u7ebf\u6216\u963b\u529b\u7ebf\uff0c\u5219\u8fd4\u56deNone \"\"\" if ts . dtype != np . float64 : ts = ts . astype ( np . float64 ) pivots = peaks_and_valleys ( ts , upthres , downthres ) pivots [ 0 ] = 0 pivots [ - 1 ] = 0 arg_max = np . argwhere ( pivots == 1 ) . flatten () arg_min = np . argwhere ( pivots == - 1 ) . flatten () resist = None support = None if len ( arg_max ) >= 2 : arg_max = arg_max [ - 2 :] y = ts [ arg_max ] coeff = np . polyfit ( arg_max , y , deg = 1 ) resist = np . poly1d ( coeff ) if len ( arg_min ) >= 2 : arg_min = arg_min [ - 2 :] y = ts [ arg_min ] coeff = np . polyfit ( arg_min , y , deg = 1 ) support = np . poly1d ( coeff ) return support , resist , np . min ([ * arg_min , * arg_max ])","title":"support_resist_lines()"},{"location":"api/talib/#omicron.talib.morph.valley_detect","text":"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Parameters: Name Type Description Default close np.ndarray \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 required thresh Tuple[float, float]) \u8bf7\u53c2\u8003 peaks_and_valleys (0.05, -0.02) Returns: Type Description int \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 Source code in omicron/talib/morph.py def valley_detect ( close : np . ndarray , thresh : Tuple [ float , float ] = ( 0.05 , - 0.02 ) ) -> int : \"\"\"\u7ed9\u5b9a\u4e00\u6bb5\u884c\u60c5\u6570\u636e\u548c\u7528\u4ee5\u68c0\u6d4b\u8fd1\u671f\u5df2\u53d1\u751f\u53cd\u8f6c\u7684\u6700\u4f4e\u70b9\uff0c\u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \u5176\u4e2dbars\u7684\u957f\u5ea6\u4e00\u822c\u4e0d\u5c0f\u4e8e60\uff0c\u4e0d\u5927\u4e8e120\u3002\u6b64\u51fd\u6570\u91c7\u7528\u4e86zigzag\u4e2d\u7684\u8c37\u5cf0\u68c0\u6d4b\u65b9\u6cd5\uff0c\u5176\u4e2d\u53c2\u6570\u9ed8\u8ba4(0.05,-0.02), \u6b64\u53c2\u6570\u5bf9\u6240\u6709\u80a1\u7968\u6570\u636e\u90fd\u9002\u7528\u3002\u82e5\u6ee1\u8db3\u53c2\u6570\uff0c\u8fd4\u56de\u503c\u4e2d\uff0c\u8ddd\u79bb\u4e3a\u5927\u4e8e0\u7684\u6574\u6570\uff0c\u6536\u76ca\u7387\u662f0~1\u7684\u5c0f\u6570\u3002 Args: close (np.ndarray): \u5177\u6709\u65f6\u95f4\u5e8f\u5217\u7684\u6536\u76d8\u4ef7 thresh (Tuple[float, float]) : \u8bf7\u53c2\u8003[peaks_and_valleys][omicron.talib.morph.peaks_and_valleys] Returns: \u8fd4\u56de\u8be5\u6bb5\u884c\u60c5\u4e2d\uff0c\u6700\u4f4e\u70b9\u5230\u6700\u540e\u4e00\u4e2a\u6570\u636e\u7684\u8ddd\u79bb\u548c\u6536\u76ca\u7387\u6570\u7ec4\uff0c \u5982\u679c\u7ed9\u5b9a\u884c\u60c5\u4e2d\u672a\u627e\u5230\u6ee1\u8db3\u53c2\u6570\u7684\u6700\u4f4e\u70b9\uff0c\u5219\u8fd4\u56de\u4e24\u4e2a\u7a7a\u503c\u6570\u7ec4\u3002 \"\"\" assert len ( close ) >= 60 , \"must provide an array with at least 60 length!\" if close . dtype != np . float64 : close = close . astype ( np . float64 ) if thresh is None : std = np . std ( close [ - 59 :] / close [ - 60 : - 1 ] - 1 ) thresh = ( 2 * std , - 2 * std ) pivots = peak_valley_pivots ( close , thresh [ 0 ], thresh [ 1 ]) flags = pivots [ pivots != 0 ] increased = None lowest_distance = None if ( flags [ - 2 ] == - 1 ) and ( flags [ - 1 ] == 1 ): length = len ( pivots ) valley_index = np . where ( pivots == - 1 )[ 0 ] increased = ( close [ - 1 ] - close [ valley_index [ - 1 ]]) / close [ valley_index [ - 1 ]] lowest_distance = int ( length - 1 - valley_index [ - 1 ]) return lowest_distance , increased","title":"valley_detect()"},{"location":"api/talib/#omicron.talib.morph.vcross","text":"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np . array ([ 3 * i ** 2 - 20 * i + 2 for i in range ( 10 )]) >>> g = np . array ([ i - 5 for i in range ( 10 )]) >>> flag , indices = vcross ( f , g ) >>> assert flag is True >>> assert indices [ 0 ] == 0 >>> assert indices [ 1 ] == 6 Parameters: Name Type Description Default f first sequence required g the second sequence required Returns: Type Description Tuple (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 Source code in omicron/talib/morph.py def vcross ( f : np . array , g : np . array ) -> Tuple : \"\"\"\u5224\u65ad\u5e8f\u5217f\u662f\u5426\u4e0eg\u5b58\u5728\u7c7b\u578bv\u578b\u7684\u76f8\u4ea4\u3002\u5373\u5b58\u5728\u4e24\u4e2a\u4ea4\u70b9\uff0c\u7b2c\u4e00\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0b\u76f8\u4ea4\uff0c\u7b2c\u4e8c\u4e2a\u4ea4\u70b9\u4e3a\u5411\u4e0a \u76f8\u4ea4\u3002\u4e00\u822c\u53cd\u6620\u4e3a\u6d17\u76d8\u62c9\u5347\u7684\u7279\u5f81\u3002 Examples: >>> f = np.array([ 3 * i ** 2 - 20 * i + 2 for i in range(10)]) >>> g = np.array([ i - 5 for i in range(10)]) >>> flag, indices = vcross(f, g) >>> assert flag is True >>> assert indices[0] == 0 >>> assert indices[1] == 6 Args: f: first sequence g: the second sequence Returns: (flag, indices), \u5176\u4e2dflag\u53d6\u503c\u4e3aTrue\u65f6\uff0c\u5b58\u5728vcross\uff0cindices\u4e3a\u4ea4\u70b9\u7684\u7d22\u5f15\u3002 \"\"\" indices = np . argwhere ( np . diff ( np . sign ( f - g ))) . flatten () if len ( indices ) == 2 : idx0 , idx1 = indices if f [ idx0 ] > g [ idx0 ] and f [ idx1 ] < g [ idx1 ]: return True , ( idx0 , idx1 ) return False , ( None , None )","title":"vcross()"},{"location":"api/timeframe/","text":"TimeFrame \u00b6 Source code in omicron/models/timeframe.py class TimeFrame : minute_level_frames = [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ] day_level_frames = [ FrameType . DAY , FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR , ] ticks = { FrameType . MIN1 : [ i for i in itertools . chain ( range ( 571 , 691 ), range ( 781 , 901 ))], FrameType . MIN5 : [ i for i in itertools . chain ( range ( 575 , 695 , 5 ), range ( 785 , 905 , 5 )) ], FrameType . MIN15 : [ i for i in itertools . chain ( range ( 585 , 705 , 15 ), range ( 795 , 915 , 15 )) ], FrameType . MIN30 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1000\" , \"1030\" , \"1100\" , \"1130\" , \"1330\" , \"1400\" , \"1430\" , \"1500\" ] ], FrameType . MIN60 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1030\" , \"1130\" , \"1400\" , \"1500\" ] ], } day_frames = None week_frames = None month_frames = None quarter_frames = None year_frames = None @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) @classmethod async def _load_calendar ( cls ): \"\"\"\u4ece\u6570\u636e\u7f13\u5b58\u4e2d\u52a0\u8f7d\u66f4\u65b0\u65e5\u5386\"\"\" from omicron import cache names = [ \"day_frames\" , \"week_frames\" , \"month_frames\" , \"quarter_frames\" , \"year_frames\" , ] for name , frame_type in zip ( names , cls . day_level_frames ): key = f \"calendar: { frame_type . value } \" result = await cache . security . lrange ( key , 0 , - 1 ) if result is not None and len ( result ): frames = [ int ( x ) for x in result ] setattr ( cls , name , np . array ( frames )) else : # pragma: no cover raise DataNotReadyError ( f \"calendar data is not ready: { name } missed\" ) @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 @classmethod async def save_calendar ( cls , trade_days ): # avoid circular import from omicron import cache for ft in [ FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR ]: days = cls . resample_frames ( trade_days , ft ) frames = [ cls . date2int ( x ) for x in days ] key = f \"calendar: { ft . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () frames = [ cls . date2int ( x ) for x in trade_days ] key = f \"calendar: { FrameType . DAY . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () @classmethod async def remove_calendar ( cls ): # avoid circular import from omicron import cache for ft in cls . day_level_frames : key = f \"calendar: { ft . value } \" await cache . security . delete ( key ) @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day ceiling ( moment , frame_type ) classmethod \u00b6 \u6c42 moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982 moment \u4e3a14:59\u5206\uff0c\u5982\u679c frame_type \u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Examples: >>> TimeFrame . day_frames = [ 20050104 , 20050105 , 20050106 , 20050107 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . week_frames = [ 20050107 , 20050114 , 20050121 , 20050128 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . month_frames = [ 20050131 , 20050228 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 1 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 15 , 0 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . ceiling ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment datetime.datetime [description] required frame_type FrameType [description] required Returns: Type Description Frame moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c Source code in omicron/models/timeframe.py @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) combine_time ( date , hour , minute = 0 , second = 0 , microsecond = 0 ) classmethod \u00b6 \u7528 date \u6307\u5b9a\u7684\u65e5\u671f\u4e0e hour , minute , second \u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame . combine_time ( datetime . date ( 2020 , 1 , 1 ), 14 , 30 ) datetime . datetime ( 2020 , 1 , 1 , 14 , 30 ) Parameters: Name Type Description Default date [description] required hour [description] required minute [description]. Defaults to 0. 0 second [description]. Defaults to 0. 0 microsecond [description]. Defaults to 0. 0 Returns: Type Description datetime.datetime \u5408\u6210\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) count_day_frames ( start , end ) classmethod \u00b6 calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime . date ( 2019 , 12 , 21 ) >>> end = datetime . date ( 2019 , 12 , 21 ) >>> TimeFrame . day_frames = [ 20191219 , 20191220 , 20191223 , 20191224 , 20191225 ] >>> TimeFrame . count_day_frames ( start , end ) 1 >>> # non-trade days are removed >>> TimeFrame . day_frames = [ 20200121 , 20200122 , 20200123 , 20200203 , 20200204 , 20200205 ] >>> start = datetime . date ( 2020 , 1 , 23 ) >>> end = datetime . date ( 2020 , 2 , 4 ) >>> TimeFrame . count_day_frames ( start , end ) 3 Parameters: Name Type Description Default start Union[datetime.date, Arrow] required end Union[datetime.date, Arrow] required Returns: Type Description int count of days Source code in omicron/models/timeframe.py @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) count_frames ( start , end , frame_type ) classmethod \u00b6 \u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: count_day_frames count_week_frames count_month_frames Parameters: Name Type Description Default start start frame required end end frame required frame_type the type of frame required Exceptions: Type Description ValueError \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: Type Description int \u4ecestart\u5230end\u7684\u5e27\u6570 Source code in omicron/models/timeframe.py @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) count_month_frames ( start , end ) classmethod \u00b6 calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int months between start and end Source code in omicron/models/timeframe.py @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) count_quarter_frames ( start , end ) classmethod \u00b6 calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int quarters between start and end Source code in omicron/models/timeframe.py @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) count_week_frames ( start , end ) classmethod \u00b6 calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int count of weeks Source code in omicron/models/timeframe.py @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) count_year_frames ( start , end ) classmethod \u00b6 calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int years between start and end Source code in omicron/models/timeframe.py @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) date2int ( d ) classmethod \u00b6 \u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame . date2int ( datetime . date ( 2020 , 5 , 1 )) 20200501 Parameters: Name Type Description Default d Union[datetime.datetime, datetime.date, Arrow] date required Returns: Type Description int \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 Source code in omicron/models/timeframe.py @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) day_shift ( start , offset ) classmethod \u00b6 \u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . day_frames = [ 20191212 , 20191213 , 20191216 , 20191217 , 20191218 , 20191219 ] >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 1 ) datetime . date ( 2019 , 12 , 16 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 1 ) datetime . date ( 2019 , 12 , 16 ) Parameters: Name Type Description Default start datetime.date the origin day required offset int days to shift, can be negative required Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) first_min_frame ( day , frame_type ) classmethod \u00b6 \u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a frame_type \u7684 frame \u3002 Examples: >>> TimeFrame . day_frames = np . array ([ 20191227 , 20191230 , 20191231 , 20200102 , 20200103 ]) >>> TimeFrame . first_min_frame ( '2019-12-31' , FrameType . MIN1 ) datetime . datetime ( 2019 , 12 , 31 , 9 , 31 ) Parameters: Name Type Description Default day Union[str, Arrow, Frame] which day? required frame_type FrameType which frame_type? required Returns: Type Description Union[datetime.date, datetime.datetime] day \u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 Source code in omicron/models/timeframe.py @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) floor ( moment , frame_type ) classmethod \u00b6 \u6c42 moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c moment \u4e3a10:37\uff0c\u5219\u5f53 frame_type \u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame . day_frames = np . array ([ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ]) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 7 , 14 , 59 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 6 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 13 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 2 , 27 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 30 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . floor ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment Frame required frame_type FrameType required Returns: Type Description Frame moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c Source code in omicron/models/timeframe.py @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) frame_len ( frame_type ) classmethod \u00b6 \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame . frame_len ( FrameType . MIN5 ) 5 Parameters: Name Type Description Default frame_type FrameType required Returns: Type Description int \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 Source code in omicron/models/timeframe.py @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 get_frame_scope ( frame , ft ) classmethod \u00b6 \u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Parameters: Name Type Description Default frame \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required ft FrameType \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH required Returns: Type Description Tuple[Frame, Frame] \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 Source code in omicron/models/timeframe.py @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end get_frames ( start , end , frame_type ) classmethod \u00b6 \u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7 floor \u6216\u8005 ceiling \u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c\u503c Examples: >>> start = arrow . get ( '2020-1-13 10:00' ) . naive >>> end = arrow . get ( '2020-1-13 13:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200109 , 20200110 , 20200113 , 20200114 , 20200115 , 20200116 ]) >>> TimeFrame . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Parameters: Name Type Description Default start Frame required end Frame required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) get_frames_by_count ( end , n , frame_type ) classmethod \u00b6 \u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06 end \u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c Examples: >>> end = arrow . get ( '2020-1-6 14:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 , 20200109 ]) >>> TimeFrame . get_frames_by_count ( end , 2 , FrameType . MIN30 ) [ 202001061400 , 202001061430 ] Parameters: Name Type Description Default end Arrow required n int required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) get_previous_trade_day ( now ) classmethod \u00b6 \u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Parameters: Name Type Description Default now \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required Returns: Type Description datetime.date \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 Source code in omicron/models/timeframe.py @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day get_ticks ( frame_type ) classmethod \u00b6 \u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame . month_frames = np . array ([ 20050131 , 20050228 , 20050331 ]) >>> TimeFrame . get_ticks ( FrameType . MONTH )[: 3 ] array ([ 20050131 , 20050228 , 20050331 ]) Parameters: Name Type Description Default frame_type [description] required Exceptions: Type Description ValueError [description] Returns: Type Description Union[List, np.array] \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame Source code in omicron/models/timeframe.py @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) init () async classmethod \u00b6 \u521d\u59cb\u5316\u65e5\u5386 Source code in omicron/models/timeframe.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () int2date ( d ) classmethod \u00b6 \u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame . int2date ( 20200501 ) datetime . date ( 2020 , 5 , 1 ) Parameters: Name Type Description Default d Union[int, str] YYYYMMDD\u8868\u793a\u7684\u65e5\u671f required Returns: Type Description datetime.date \u8f6c\u6362\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) int2time ( tm ) classmethod \u00b6 \u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a datetime \u7c7b\u578b\u8868\u793a Examples: >>> TimeFrame . int2time ( 202005011500 ) datetime . datetime ( 2020 , 5 , 1 , 15 , 0 ) Parameters: Name Type Description Default tm int time in YYYYMMDDHHmm format required Returns: Type Description datetime.datetime \u8f6c\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) is_bar_closed ( frame , ft ) classmethod \u00b6 \u5224\u65ad frame \u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Parameters: Name Type Description Default frame bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 required ft FrameType bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b required Returns: Type Description bool \u662f\u5426\u5df2\u7ecf\u6536\u76d8 Source code in omicron/models/timeframe.py @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame is_closing_call_auction_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 is_open_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 ]) >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-1 14:59' ) . naive ) False >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-3 14:59' ) . naive ) True Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] is_opening_call_auction_time ( tm = None ) classmethod \u00b6 \u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 is_trade_day ( dt ) classmethod \u00b6 \u5224\u65ad dt \u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . is_trade_day ( arrow . get ( '2020-1-1' )) False Parameters: Name Type Description Default dt required Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames last_min_frame ( day , frame_type ) classmethod \u00b6 \u83b7\u53d6 day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe\u3002 Examples: >>> TimeFrame . last_min_frame ( arrow . get ( '2020-1-5' ) . date (), FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 3 , 15 , 0 ) Parameters: Name Type Description Default day Union[str, Arrow, datetime.date] required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe Source code in omicron/models/timeframe.py @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) minute_frames_floor ( ticks , moment ) classmethod \u00b6 \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [ 600 , 630 , 660 , 690 , 810 , 840 , 870 , 900 ] >>> TimeFrame . minute_frames_floor ( ticks , 545 ) ( 900 , - 1 ) >>> TimeFrame . minute_frames_floor ( ticks , 600 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 605 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 899 ) ( 870 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 900 ) ( 900 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 905 ) ( 900 , 0 ) Parameters: Name Type Description Default ticks np.array or list frames\u523b\u5ea6 required moment int \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 required Returns: Type Description Tuple[int, int] tuple, the first is the new moment, the second is carry-on Source code in omicron/models/timeframe.py @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 month_shift ( start , offset ) classmethod \u00b6 \u6c42 start \u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06 start \u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame . month_frames = np . array ([ 20150130 , 20150227 , 20150331 , 20150430 ]) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-26' ) . date (), 0 ) datetime . date ( 2015 , 1 , 30 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-27' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 1 ) datetime . date ( 2015 , 3 , 31 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) replace_date ( dtm , dt ) classmethod \u00b6 \u5c06 dtm \u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a dt \u6307\u5b9a\u7684\u65e5\u671f Examples: >>> TimeFrame . replace_date ( arrow . get ( '2020-1-1 13:49' ) . datetime , datetime . date ( 2019 , 1 , 1 )) datetime . datetime ( 2019 , 1 , 1 , 13 , 49 ) Parameters: Name Type Description Default dtm datetime.datetime [description] required dt datetime.date [description] required Returns: Type Description datetime.datetime \u53d8\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) resample_frames ( trade_days , frame_type ) classmethod \u00b6 \u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Parameters: Name Type Description Default trade_days Iterable [description] required frame_type FrameType [description] required Returns: Type Description List[int] \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a Source code in omicron/models/timeframe.py @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) service_degrade () classmethod \u00b6 \u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 Source code in omicron/models/timeframe.py @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) shift ( moment , n , frame_type ) classmethod \u00b6 \u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a frame_type \u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: day_shift week_shift month_shift Examples: >>> TimeFrame . shift ( datetime . date ( 2020 , 1 , 3 ), 1 , FrameType . DAY ) datetime . date ( 2020 , 1 , 6 ) >>> TimeFrame . shift ( datetime . datetime ( 2020 , 1 , 6 , 11 ), 1 , FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 6 , 11 , 30 ) Parameters: Name Type Description Default moment Union[Arrow, datetime.date, datetime.datetime] required n int required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] \u79fb\u4f4d\u540e\u7684Frame Source code in omicron/models/timeframe.py @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) time2int ( tm ) classmethod \u00b6 \u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame . time2int ( datetime . datetime ( 2020 , 5 , 1 , 15 )) 202005011500 Parameters: Name Type Description Default tm Union[datetime.datetime, Arrow] required Returns: Type Description int \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 Source code in omicron/models/timeframe.py @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) week_shift ( start , offset ) classmethod \u00b6 \u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 omicron.models.timeframe.TimeFrame.day_shift Examples: >>> TimeFrame . week_frames = np . array ([ 20200103 , 20200110 , 20200117 , 20200123 , 20200207 , 20200214 ]) >>> moment = arrow . get ( '2020-1-21' ) . date () >>> TimeFrame . week_shift ( moment , 1 ) datetime . date ( 2020 , 1 , 23 ) >>> TimeFrame . week_shift ( moment , 0 ) datetime . date ( 2020 , 1 , 17 ) >>> TimeFrame . week_shift ( moment , - 1 ) datetime . date ( 2020 , 1 , 10 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) rendering: heading_level: 1","title":"timeframe"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame","text":"Source code in omicron/models/timeframe.py class TimeFrame : minute_level_frames = [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ] day_level_frames = [ FrameType . DAY , FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR , ] ticks = { FrameType . MIN1 : [ i for i in itertools . chain ( range ( 571 , 691 ), range ( 781 , 901 ))], FrameType . MIN5 : [ i for i in itertools . chain ( range ( 575 , 695 , 5 ), range ( 785 , 905 , 5 )) ], FrameType . MIN15 : [ i for i in itertools . chain ( range ( 585 , 705 , 15 ), range ( 795 , 915 , 15 )) ], FrameType . MIN30 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1000\" , \"1030\" , \"1100\" , \"1130\" , \"1330\" , \"1400\" , \"1430\" , \"1500\" ] ], FrameType . MIN60 : [ int ( s [: 2 ]) * 60 + int ( s [ 2 :]) for s in [ \"1030\" , \"1130\" , \"1400\" , \"1500\" ] ], } day_frames = None week_frames = None month_frames = None quarter_frames = None year_frames = None @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v )) @classmethod async def _load_calendar ( cls ): \"\"\"\u4ece\u6570\u636e\u7f13\u5b58\u4e2d\u52a0\u8f7d\u66f4\u65b0\u65e5\u5386\"\"\" from omicron import cache names = [ \"day_frames\" , \"week_frames\" , \"month_frames\" , \"quarter_frames\" , \"year_frames\" , ] for name , frame_type in zip ( names , cls . day_level_frames ): key = f \"calendar: { frame_type . value } \" result = await cache . security . lrange ( key , 0 , - 1 ) if result is not None and len ( result ): frames = [ int ( x ) for x in result ] setattr ( cls , name , np . array ( frames )) else : # pragma: no cover raise DataNotReadyError ( f \"calendar data is not ready: { name } missed\" ) @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar () @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) ) @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" ) @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" ) @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :])) @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset )) @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset )) @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" ) @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" ) @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end )) @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end )) @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end )) @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end )) @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end )) @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" ) @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ] @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25 @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60 @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored ) @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240 @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" ) @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type ) @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" ) @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type ) @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond ) @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond ) @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" ) @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0 @classmethod async def save_calendar ( cls , trade_days ): # avoid circular import from omicron import cache for ft in [ FrameType . WEEK , FrameType . MONTH , FrameType . QUARTER , FrameType . YEAR ]: days = cls . resample_frames ( trade_days , ft ) frames = [ cls . date2int ( x ) for x in days ] key = f \"calendar: { ft . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () frames = [ cls . date2int ( x ) for x in trade_days ] key = f \"calendar: { FrameType . DAY . value } \" pl = cache . security . pipeline () pl . delete ( key ) pl . rpush ( key , * frames ) await pl . execute () @classmethod async def remove_calendar ( cls ): # avoid circular import from omicron import cache for ft in cls . day_level_frames : key = f \"calendar: { ft . value } \" await cache . security . delete ( key ) @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day","title":"TimeFrame"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.ceiling","text":"\u6c42 moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982 moment \u4e3a14:59\u5206\uff0c\u5982\u679c frame_type \u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Examples: >>> TimeFrame . day_frames = [ 20050104 , 20050105 , 20050106 , 20050107 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . week_frames = [ 20050107 , 20050114 , 20050121 , 20050128 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 4 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 7 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . month_frames = [ 20050131 , 20050228 ] >>> TimeFrame . ceiling ( datetime . date ( 2005 , 1 , 1 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 15 , 0 ) >>> TimeFrame . ceiling ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . ceiling ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment datetime.datetime [description] required frame_type FrameType [description] required Returns: Type Description Frame moment \u6240\u5728\u7c7b\u578b\u4e3a frame_type \u5468\u671f\u7684\u4e0a\u754c Source code in omicron/models/timeframe.py @classmethod def ceiling ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \u6bd4\u5982`moment`\u4e3a14:59\u5206\uff0c\u5982\u679c`frame_type`\u4e3a30\u5206\u949f\uff0c\u5219\u5b83\u7684\u4e0a\u754c\u5e94\u8be5\u4e3a15:00 Example: >>> TimeFrame.day_frames = [20050104, 20050105, 20050106, 20050107] >>> TimeFrame.ceiling(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> TimeFrame.week_frames = [20050107, 20050114, 20050121, 20050128] >>> TimeFrame.ceiling(datetime.date(2005, 1, 4), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.ceiling(datetime.date(2005,1,7), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.month_frames = [20050131, 20050228] >>> TimeFrame.ceiling(datetime.date(2005,1 ,1), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.ceiling(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 15, 0) >>> TimeFrame.ceiling(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.ceiling(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment (datetime.datetime): [description] frame_type (FrameType): [description] Returns: `moment`\u6240\u5728\u7c7b\u578b\u4e3a`frame_type`\u5468\u671f\u7684\u4e0a\u754c \"\"\" if frame_type in cls . day_level_frames and type ( moment ) == datetime . datetime : moment = moment . date () floor = cls . floor ( moment , frame_type ) if floor == moment : return moment elif floor > moment : return floor else : return cls . shift ( floor , 1 , frame_type )","title":"ceiling()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.combine_time","text":"\u7528 date \u6307\u5b9a\u7684\u65e5\u671f\u4e0e hour , minute , second \u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame . combine_time ( datetime . date ( 2020 , 1 , 1 ), 14 , 30 ) datetime . datetime ( 2020 , 1 , 1 , 14 , 30 ) Parameters: Name Type Description Default date [description] required hour [description] required minute [description]. Defaults to 0. 0 second [description]. Defaults to 0. 0 microsecond [description]. Defaults to 0. 0 Returns: Type Description datetime.datetime \u5408\u6210\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def combine_time ( cls , date : datetime . date , hour : int , minute : int = 0 , second : int = 0 , microsecond : int = 0 , ) -> datetime . datetime : \"\"\"\u7528`date`\u6307\u5b9a\u7684\u65e5\u671f\u4e0e`hour`, `minute`, `second`\u7b49\u53c2\u6570\u4e00\u8d77\u5408\u6210\u65b0\u7684\u65f6\u95f4 Examples: >>> TimeFrame.combine_time(datetime.date(2020, 1, 1), 14, 30) datetime.datetime(2020, 1, 1, 14, 30) Args: date : [description] hour : [description] minute : [description]. Defaults to 0. second : [description]. Defaults to 0. microsecond : [description]. Defaults to 0. Returns: \u5408\u6210\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( date . year , date . month , date . day , hour , minute , second , microsecond )","title":"combine_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_day_frames","text":"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime . date ( 2019 , 12 , 21 ) >>> end = datetime . date ( 2019 , 12 , 21 ) >>> TimeFrame . day_frames = [ 20191219 , 20191220 , 20191223 , 20191224 , 20191225 ] >>> TimeFrame . count_day_frames ( start , end ) 1 >>> # non-trade days are removed >>> TimeFrame . day_frames = [ 20200121 , 20200122 , 20200123 , 20200203 , 20200204 , 20200205 ] >>> start = datetime . date ( 2020 , 1 , 23 ) >>> end = datetime . date ( 2020 , 2 , 4 ) >>> TimeFrame . count_day_frames ( start , end ) 3 Parameters: Name Type Description Default start Union[datetime.date, Arrow] required end Union[datetime.date, Arrow] required Returns: Type Description int count of days Source code in omicron/models/timeframe.py @classmethod def count_day_frames ( cls , start : Union [ datetime . date , Arrow ], end : Union [ datetime . date , Arrow ] ) -> int : \"\"\"calc trade days between start and end in close-to-close way. if start == end, this will returns 1. Both start/end will be aligned to open trade day before calculation. Examples: >>> start = datetime.date(2019, 12, 21) >>> end = datetime.date(2019, 12, 21) >>> TimeFrame.day_frames = [20191219, 20191220, 20191223, 20191224, 20191225] >>> TimeFrame.count_day_frames(start, end) 1 >>> # non-trade days are removed >>> TimeFrame.day_frames = [20200121, 20200122, 20200123, 20200203, 20200204, 20200205] >>> start = datetime.date(2020, 1, 23) >>> end = datetime.date(2020, 2, 4) >>> TimeFrame.count_day_frames(start, end) 3 args: start: end: returns: count of days \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . day_frames , start , end ))","title":"count_day_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_frames","text":"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: count_day_frames count_week_frames count_month_frames Parameters: Name Type Description Default start start frame required end end frame required frame_type the type of frame required Exceptions: Type Description ValueError \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: Type Description int \u4ecestart\u5230end\u7684\u5e27\u6570 Source code in omicron/models/timeframe.py @classmethod def count_frames ( cls , start : Union [ datetime . date , datetime . datetime , Arrow ], end : Union [ datetime . date , datetime . datetime , Arrow ], frame_type , ) -> int : \"\"\"\u8ba1\u7b97start\u4e0eend\u4e4b\u95f4\u6709\u591a\u5c11\u4e2a\u5468\u671f\u4e3aframe_type\u7684frames See also: - [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] - [count_week_frames][omicron.models.timeframe.TimeFrame.count_week_frames] - [count_month_frames][omicron.models.timeframe.TimeFrame.count_month_frames] Args: start : start frame end : end frame frame_type : the type of frame Raises: ValueError: \u5982\u679cframe_type\u4e0d\u652f\u6301\uff0c\u5219\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38\u3002 Returns: \u4ecestart\u5230end\u7684\u5e27\u6570 \"\"\" if frame_type == FrameType . DAY : return cls . count_day_frames ( start , end ) elif frame_type == FrameType . WEEK : return cls . count_week_frames ( start , end ) elif frame_type == FrameType . MONTH : return cls . count_month_frames ( start , end ) elif frame_type == FrameType . QUARTER : return cls . count_quarter_frames ( start , end ) elif frame_type == FrameType . YEAR : return cls . count_year_frames ( start , end ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm_start = start . hour * 60 + start . minute tm_end = end . hour * 60 + end . minute days = cls . count_day_frames ( start . date (), end . date ()) - 1 tm_start_pos = cls . ticks [ frame_type ] . index ( tm_start ) tm_end_pos = cls . ticks [ frame_type ] . index ( tm_end ) min_bars = tm_end_pos - tm_start_pos + 1 return days * len ( cls . ticks [ frame_type ]) + min_bars else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported yet\" )","title":"count_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_month_frames","text":"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int months between start and end Source code in omicron/models/timeframe.py @classmethod def count_month_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade months between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start: end: Returns: months between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . month_frames , start , end ))","title":"count_month_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_quarter_frames","text":"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int quarters between start and end Source code in omicron/models/timeframe.py @classmethod def count_quarter_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade quarters between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: quarters between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . quarter_frames , start , end ))","title":"count_quarter_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_week_frames","text":"calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date required end datetime.date required Returns: Type Description int count of weeks Source code in omicron/models/timeframe.py @classmethod def count_week_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\" calc trade weeks between start and end in close-to-close way. Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1 for examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] args: start: end: returns: count of weeks \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . week_frames , start , end ))","title":"count_week_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.count_year_frames","text":"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to count_day_frames Parameters: Name Type Description Default start datetime.date [description] required end datetime.date [description] required Returns: Type Description int years between start and end Source code in omicron/models/timeframe.py @classmethod def count_year_frames ( cls , start : datetime . date , end : datetime . date ) -> int : \"\"\"calc trade years between start and end date in close-to-close way Both start and end will be aligned to open trade day before calculation. After that, if start == end, this will returns 1. For examples, please refer to [count_day_frames][omicron.models.timeframe.TimeFrame.count_day_frames] Args: start (datetime.date): [description] end (datetime.date): [description] Returns: years between start and end \"\"\" start = cls . date2int ( start ) end = cls . date2int ( end ) return int ( ext . count_between ( cls . year_frames , start , end ))","title":"count_year_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.date2int","text":"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame . date2int ( datetime . date ( 2020 , 5 , 1 )) 20200501 Parameters: Name Type Description Default d Union[datetime.datetime, datetime.date, Arrow] date required Returns: Type Description int \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 Source code in omicron/models/timeframe.py @classmethod def date2int ( cls , d : Union [ datetime . datetime , datetime . date , Arrow ]) -> int : \"\"\"\u5c06\u65e5\u671f\u8f6c\u6362\u4e3a\u6574\u6570\u8868\u793a \u5728zillionare\u4e2d\uff0c\u5982\u679c\u8981\u5bf9\u65f6\u95f4\u548c\u65e5\u671f\u8fdb\u884c\u6301\u4e45\u5316\u64cd\u4f5c\uff0c\u6211\u4eec\u4e00\u822c\u5c06\u5176\u8f6c\u6362\u4e3aint\u7c7b\u578b Examples: >>> TimeFrame.date2int(datetime.date(2020,5,1)) 20200501 Args: d: date Returns: \u65e5\u671f\u7684\u6574\u6570\u8868\u793a\uff0c\u6bd4\u598220220211 \"\"\" return int ( f \" { d . year : 04 }{ d . month : 02 }{ d . day : 02 } \" )","title":"date2int()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.day_shift","text":"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . day_frames = [ 20191212 , 20191213 , 20191216 , 20191217 , 20191218 , 20191219 ] >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 0 ) datetime . date ( 2019 , 12 , 13 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 15 ), 1 ) datetime . date ( 2019 , 12 , 16 ) >>> TimeFrame . day_shift ( datetime . date ( 2019 , 12 , 13 ), 1 ) datetime . date ( 2019 , 12 , 16 ) Parameters: Name Type Description Default start datetime.date the origin day required offset int days to shift, can be negative required Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def day_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u5982\u679c n == 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\uff08\u5982\u679c\u662f\u975e\u4ea4\u6613\u65e5\uff0c\u5219\u8fd4\u56de\u521a\u7ed3\u675f\u7684\u4e00\u4e2a\u4ea4\u6613\u65e5\uff09 \u5982\u679c n > 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u540e\u7b2c n \u4e2a\u4ea4\u6613\u65e5 \u5982\u679c n < 0\uff0c\u5219\u8fd4\u56ded\u5bf9\u5e94\u7684\u4ea4\u6613\u65e5\u524d\u7b2c n \u4e2a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.day_frames = [20191212, 20191213, 20191216, 20191217,20191218, 20191219] >>> TimeFrame.day_shift(datetime.date(2019,12,13), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 0) datetime.date(2019, 12, 13) >>> TimeFrame.day_shift(datetime.date(2019, 12, 15), 1) datetime.date(2019, 12, 16) >>> TimeFrame.day_shift(datetime.date(2019, 12, 13), 1) datetime.date(2019, 12, 16) Args: start: the origin day offset: days to shift, can be negative Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" # accelerated from 0.12 to 0.07, per 10000 loop, type conversion time included start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . day_frames , start , offset ))","title":"day_shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.first_min_frame","text":"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a frame_type \u7684 frame \u3002 Examples: >>> TimeFrame . day_frames = np . array ([ 20191227 , 20191230 , 20191231 , 20200102 , 20200103 ]) >>> TimeFrame . first_min_frame ( '2019-12-31' , FrameType . MIN1 ) datetime . datetime ( 2019 , 12 , 31 , 9 , 31 ) Parameters: Name Type Description Default day Union[str, Arrow, Frame] which day? required frame_type FrameType which frame_type? required Returns: Type Description Union[datetime.date, datetime.datetime] day \u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 Source code in omicron/models/timeframe.py @classmethod def first_min_frame ( cls , day : Union [ str , Arrow , Frame ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6\u6307\u5b9a\u65e5\u671f\u7c7b\u578b\u4e3a`frame_type`\u7684`frame`\u3002 Examples: >>> TimeFrame.day_frames = np.array([20191227, 20191230, 20191231, 20200102, 20200103]) >>> TimeFrame.first_min_frame('2019-12-31', FrameType.MIN1) datetime.datetime(2019, 12, 31, 9, 31) Args: day: which day? frame_type: which frame_type? Returns: `day`\u5f53\u65e5\u7684\u7b2c\u4e00\u5e27 \"\"\" day = cls . date2int ( arrow . get ( day ) . date ()) if frame_type == FrameType . MIN1 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 31 ) elif frame_type == FrameType . MIN5 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 35 ) elif frame_type == FrameType . MIN15 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 9 , minute = 45 ) elif frame_type == FrameType . MIN30 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 ) elif frame_type == FrameType . MIN60 : floor_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( floor_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 10 , minute = 30 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" )","title":"first_min_frame()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.floor","text":"\u6c42 moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c moment \u4e3a10:37\uff0c\u5219\u5f53 frame_type \u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame . day_frames = np . array ([ 20050104 , 20050105 , 20050106 , 20050107 , 20050110 , 20050111 ]) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 7 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 7 ) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 7 , 14 , 59 ), FrameType . DAY ) datetime . date ( 2005 , 1 , 6 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 1 , 13 ), FrameType . WEEK ) datetime . date ( 2005 , 1 , 7 ) >>> TimeFrame . floor ( datetime . date ( 2005 , 2 , 27 ), FrameType . MONTH ) datetime . date ( 2005 , 1 , 31 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN30 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 30 ) >>> TimeFrame . floor ( datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ), FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) >>> TimeFrame . floor ( arrow . get ( '2005-1-5 14:59' ) . naive , FrameType . MIN1 ) datetime . datetime ( 2005 , 1 , 5 , 14 , 59 ) Parameters: Name Type Description Default moment Frame required frame_type FrameType required Returns: Type Description Frame moment \u5728\u6307\u5b9a\u7684 frame_type \u4e2d\u7684\u4e0b\u754c Source code in omicron/models/timeframe.py @classmethod def floor ( cls , moment : Frame , frame_type : FrameType ) -> Frame : \"\"\"\u6c42`moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \u6bd4\u5982\uff0c\u5982\u679c`moment`\u4e3a10:37\uff0c\u5219\u5f53`frame_type`\u4e3a30\u5206\u949f\u65f6\uff0c\u5bf9\u5e94\u7684\u4e0a\u754c\u4e3a10:00 Examples: >>> # \u5982\u679cmoment\u4e3a\u65e5\u671f\uff0c\u5219\u5f53\u6210\u5df2\u6536\u76d8\u5904\u7406 >>> TimeFrame.day_frames = np.array([20050104, 20050105, 20050106, 20050107, 20050110, 20050111]) >>> TimeFrame.floor(datetime.date(2005, 1, 7), FrameType.DAY) datetime.date(2005, 1, 7) >>> # moment\u6307\u5b9a\u7684\u65f6\u95f4\u8fd8\u672a\u6536\u76d8\uff0cfloor\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 >>> TimeFrame.floor(datetime.datetime(2005, 1, 7, 14, 59), FrameType.DAY) datetime.date(2005, 1, 6) >>> TimeFrame.floor(datetime.date(2005, 1, 13), FrameType.WEEK) datetime.date(2005, 1, 7) >>> TimeFrame.floor(datetime.date(2005,2, 27), FrameType.MONTH) datetime.date(2005, 1, 31) >>> TimeFrame.floor(datetime.datetime(2005,1,5,14,59), FrameType.MIN30) datetime.datetime(2005, 1, 5, 14, 30) >>> TimeFrame.floor(datetime.datetime(2005, 1, 5, 14, 59), FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) >>> TimeFrame.floor(arrow.get('2005-1-5 14:59').naive, FrameType.MIN1) datetime.datetime(2005, 1, 5, 14, 59) Args: moment: frame_type: Returns: `moment`\u5728\u6307\u5b9a\u7684`frame_type`\u4e2d\u7684\u4e0b\u754c \"\"\" if frame_type in cls . minute_level_frames : tm , day_offset = cls . minute_frames_floor ( cls . ticks [ frame_type ], moment . hour * 60 + moment . minute ) h , m = tm // 60 , tm % 60 if cls . day_shift ( moment , 0 ) < moment . date () or day_offset == - 1 : h = 15 m = 0 new_day = cls . day_shift ( moment , day_offset ) else : new_day = moment . date () return datetime . datetime ( new_day . year , new_day . month , new_day . day , h , m ) if type ( moment ) == datetime . date : moment = datetime . datetime ( moment . year , moment . month , moment . day , 15 ) # \u5982\u679c\u662f\u4ea4\u6613\u65e5\uff0c\u4f46\u8fd8\u672a\u6536\u76d8 if ( cls . date2int ( moment ) in cls . day_frames and moment . hour * 60 + moment . minute < 900 ): moment = cls . day_shift ( moment , - 1 ) day = cls . date2int ( moment ) if frame_type == FrameType . DAY : arr = cls . day_frames elif frame_type == FrameType . WEEK : arr = cls . week_frames elif frame_type == FrameType . MONTH : arr = cls . month_frames else : # pragma: no cover raise ValueError ( f \"frame type { frame_type } not supported.\" ) floored = ext . floor ( arr , day ) return cls . int2date ( floored )","title":"floor()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.frame_len","text":"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame . frame_len ( FrameType . MIN5 ) 5 Parameters: Name Type Description Default frame_type FrameType required Returns: Type Description int \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 Source code in omicron/models/timeframe.py @classmethod def frame_len ( cls , frame_type : FrameType ) -> int : \"\"\"\u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \u5bf9\u65e5\u7ebf\u4ee5\u4e0a\u7ea7\u522b\u6ca1\u6709\u610f\u4e49\uff0c\u4f46\u4f1a\u8fd4\u56de240 Examples: >>> TimeFrame.frame_len(FrameType.MIN5) 5 Args: frame_type: Returns: \u8fd4\u56de\u4ee5\u5206\u949f\u4e3a\u5355\u4f4d\u7684frame\u957f\u5ea6\u3002 \"\"\" if frame_type == FrameType . MIN1 : return 1 elif frame_type == FrameType . MIN5 : return 5 elif frame_type == FrameType . MIN15 : return 15 elif frame_type == FrameType . MIN30 : return 30 elif frame_type == FrameType . MIN60 : return 60 else : return 240","title":"frame_len()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frame_scope","text":"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Parameters: Name Type Description Default frame \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required ft FrameType \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH required Returns: Type Description Tuple[Frame, Frame] \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 Source code in omicron/models/timeframe.py @classmethod def get_frame_scope ( cls , frame : Frame , ft : FrameType ) -> Tuple [ Frame , Frame ]: # todo: \u51fd\u6570\u7684\u901a\u7528\u6027\u4e0d\u8db3\uff0c\u4f3c\u4e4e\u5e94\u8be5\u653e\u5728\u5177\u4f53\u7684\u4e1a\u52a1\u7c7b\u4e2d\u3002\u5982\u679c\u662f\u901a\u7528\u578b\u7684\u51fd\u6570\uff0c\u53c2\u6570\u4e0d\u5e94\u8be5\u5c40\u9650\u4e8e\u5468\u548c\u6708\u3002 \"\"\"\u5bf9\u4e8e\u7ed9\u5b9a\u7684\u65f6\u95f4\uff0c\u53d6\u6240\u5728\u5468\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929\uff0c\u6240\u5728\u6708\u7684\u7b2c\u4e00\u5929\u548c\u6700\u540e\u4e00\u5929 Args: frame : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 ft: \u5e27\u7c7b\u578b\uff0c\u652f\u6301WEEK\u548cMONTH Returns: Tuple[Frame, Frame]: \u5468\u6216\u8005\u6708\u7684\u9996\u672b\u65e5\u671f\uff08date\u5bf9\u8c61\uff09 \"\"\" if frame is None : raise ValueError ( \"frame cannot be None\" ) if ft not in ( FrameType . WEEK , FrameType . MONTH ): raise ValueError ( f \"FrameType only supports WEEK and MONTH: { ft } \" ) if isinstance ( frame , datetime . datetime ): frame = frame . date () if frame < CALENDAR_START : raise ValueError ( f \"cannot be earlier than { CALENDAR_START } : { frame } \" ) # datetime.date(2021, 10, 8)\uff0c\u8fd9\u662f\u4e2a\u7279\u6b8a\u7684\u65e5\u671f if ft == FrameType . WEEK : if frame < datetime . date ( 2005 , 1 , 10 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 7 ) if not cls . is_trade_day ( frame ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u56de\u9000\u4e00\u5929 week_day = cls . day_shift ( frame , 0 ) else : week_day = frame w1 = TimeFrame . floor ( week_day , FrameType . WEEK ) if w1 == week_day : # \u672c\u5468\u7684\u6700\u540e\u4e00\u4e2a\u4ea4\u6613\u65e5 week_end = w1 else : week_end = TimeFrame . week_shift ( week_day , 1 ) w0 = TimeFrame . week_shift ( week_end , - 1 ) week_start = TimeFrame . day_shift ( w0 , 1 ) return week_start , week_end if ft == FrameType . MONTH : if frame <= datetime . date ( 2005 , 1 , 31 ): return datetime . date ( 2005 , 1 , 4 ), datetime . date ( 2005 , 1 , 31 ) month_start = frame . replace ( day = 1 ) if not cls . is_trade_day ( month_start ): # \u975e\u4ea4\u6613\u65e5\u7684\u60c5\u51b5\uff0c\u76f4\u63a5\u52a01 month_start = cls . day_shift ( month_start , 1 ) month_end = TimeFrame . month_shift ( month_start , 1 ) return month_start , month_end","title":"get_frame_scope()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frames","text":"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7 floor \u6216\u8005 ceiling \u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c\u503c Examples: >>> start = arrow . get ( '2020-1-13 10:00' ) . naive >>> end = arrow . get ( '2020-1-13 13:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200109 , 20200110 , 20200113 , 20200114 , 20200115 , 20200116 ]) >>> TimeFrame . get_frames ( start , end , FrameType . MIN30 ) [ 202001131000 , 202001131030 , 202001131100 , 202001131130 , 202001131330 ] Parameters: Name Type Description Default start Frame required end Frame required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames ( cls , start : Frame , end : Frame , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6[start, end]\u95f4\u6240\u6709\u7c7b\u578b\u4e3aframe_type\u7684frames \u8c03\u7528\u672c\u51fd\u6570\u524d\uff0c\u8bf7\u5148\u901a\u8fc7`floor`\u6216\u8005`ceiling`\u5c06\u65f6\u95f4\u5e27\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c\u503c Example: >>> start = arrow.get('2020-1-13 10:00').naive >>> end = arrow.get('2020-1-13 13:30').naive >>> TimeFrame.day_frames = np.array([20200109, 20200110, 20200113,20200114, 20200115, 20200116]) >>> TimeFrame.get_frames(start, end, FrameType.MIN30) [202001131000, 202001131030, 202001131100, 202001131130, 202001131330] Args: start: end: frame_type: Returns: frame list \"\"\" n = cls . count_frames ( start , end , frame_type ) return cls . get_frames_by_count ( end , n , frame_type )","title":"get_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_frames_by_count","text":"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06 end \u5bf9\u9f50\u5230 frame_type \u7684\u8fb9\u754c Examples: >>> end = arrow . get ( '2020-1-6 14:30' ) . naive >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 , 20200109 ]) >>> TimeFrame . get_frames_by_count ( end , 2 , FrameType . MIN30 ) [ 202001061400 , 202001061430 ] Parameters: Name Type Description Default end Arrow required n int required frame_type FrameType required Returns: Type Description List[int] frame list Source code in omicron/models/timeframe.py @classmethod def get_frames_by_count ( cls , end : Arrow , n : int , frame_type : FrameType ) -> List [ int ]: \"\"\"\u53d6\u4ee5end\u4e3a\u7ed3\u675f\u70b9,\u5468\u671f\u4e3aframe_type\u7684n\u4e2aframe \u8c03\u7528\u524d\u8bf7\u5c06`end`\u5bf9\u9f50\u5230`frame_type`\u7684\u8fb9\u754c Examples: >>> end = arrow.get('2020-1-6 14:30').naive >>> TimeFrame.day_frames = np.array([20200102, 20200103,20200106, 20200107, 20200108, 20200109]) >>> TimeFrame.get_frames_by_count(end, 2, FrameType.MIN30) [202001061400, 202001061430] Args: end: n: frame_type: Returns: frame list \"\"\" if frame_type == FrameType . DAY : end = cls . date2int ( end ) pos = np . searchsorted ( cls . day_frames , end , side = \"right\" ) return cls . day_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . WEEK : end = cls . date2int ( end ) pos = np . searchsorted ( cls . week_frames , end , side = \"right\" ) return cls . week_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type == FrameType . MONTH : end = cls . date2int ( end ) pos = np . searchsorted ( cls . month_frames , end , side = \"right\" ) return cls . month_frames [ max ( 0 , pos - n ) : pos ] . tolist () elif frame_type in { FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , }: n_days = n // len ( cls . ticks [ frame_type ]) + 2 ticks = cls . ticks [ frame_type ] * n_days days = cls . get_frames_by_count ( end , n_days , FrameType . DAY ) days = np . repeat ( days , len ( cls . ticks [ frame_type ])) ticks = [ day . item () * 10000 + int ( tm / 60 ) * 100 + tm % 60 for day , tm in zip ( days , ticks ) ] # list index is much faster than ext.index_sorted when the arr is small pos = ticks . index ( cls . time2int ( end )) + 1 return ticks [ max ( 0 , pos - n ) : pos ] else : # pragma: no cover raise ValueError ( f \" { frame_type } not support yet\" )","title":"get_frames_by_count()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_previous_trade_day","text":"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Parameters: Name Type Description Default now \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 required Returns: Type Description datetime.date \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 Source code in omicron/models/timeframe.py @classmethod def get_previous_trade_day ( cls , now : datetime . date ): \"\"\"\u83b7\u53d6\u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \u5982\u679c\u5f53\u5929\u662f\u5468\u516d\u6216\u8005\u5468\u65e5\uff0c\u8fd4\u56de\u5468\u4e94\uff08\u4ea4\u6613\u65e5\uff09\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e00\uff0c\u8fd4\u56de\u5468\u4e94\uff0c\u5982\u679c\u5f53\u5929\u662f\u5468\u4e94\uff0c\u8fd4\u56de\u5468\u56db Args: now : \u6307\u5b9a\u7684\u65e5\u671f\uff0cdate\u5bf9\u8c61 Returns: datetime.date: \u4e0a\u4e00\u4e2a\u4ea4\u6613\u65e5 \"\"\" if now == datetime . date ( 2005 , 1 , 4 ): return now if TimeFrame . is_trade_day ( now ): pre_trade_day = TimeFrame . day_shift ( now , - 1 ) else : pre_trade_day = TimeFrame . day_shift ( now , 0 ) return pre_trade_day","title":"get_previous_trade_day()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.get_ticks","text":"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame . month_frames = np . array ([ 20050131 , 20050228 , 20050331 ]) >>> TimeFrame . get_ticks ( FrameType . MONTH )[: 3 ] array ([ 20050131 , 20050228 , 20050331 ]) Parameters: Name Type Description Default frame_type [description] required Exceptions: Type Description ValueError [description] Returns: Type Description Union[List, np.array] \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame Source code in omicron/models/timeframe.py @classmethod def get_ticks ( cls , frame_type : FrameType ) -> Union [ List , np . array ]: \"\"\"\u53d6\u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \u5bf9\u5206\u949f\u7ebf\uff0c\u8fd4\u56de\u503c\u4ec5\u5305\u542b\u65f6\u95f4\uff0c\u4e0d\u5305\u542b\u65e5\u671f\uff08\u5747\u4e3a\u6574\u6570\u8868\u793a\uff09 Examples: >>> TimeFrame.month_frames = np.array([20050131, 20050228, 20050331]) >>> TimeFrame.get_ticks(FrameType.MONTH)[:3] array([20050131, 20050228, 20050331]) Args: frame_type : [description] Raises: ValueError: [description] Returns: \u6708\u7ebf\u3001\u5468\u7ebf\u3001\u65e5\u7ebf\u53ca\u5404\u5206\u949f\u7ebf\u5bf9\u5e94\u7684frame \"\"\" if frame_type in cls . minute_level_frames : return cls . ticks [ frame_type ] if frame_type == FrameType . DAY : return cls . day_frames elif frame_type == FrameType . WEEK : return cls . week_frames elif frame_type == FrameType . MONTH : return cls . month_frames else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported!\" )","title":"get_ticks()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.init","text":"\u521d\u59cb\u5316\u65e5\u5386 Source code in omicron/models/timeframe.py @classmethod async def init ( cls ): \"\"\"\u521d\u59cb\u5316\u65e5\u5386\"\"\" await cls . _load_calendar ()","title":"init()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.int2date","text":"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame . int2date ( 20200501 ) datetime . date ( 2020 , 5 , 1 ) Parameters: Name Type Description Default d Union[int, str] YYYYMMDD\u8868\u793a\u7684\u65e5\u671f required Returns: Type Description datetime.date \u8f6c\u6362\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def int2date ( cls , d : Union [ int , str ]) -> datetime . date : \"\"\"\u5c06\u6570\u5b57\u8868\u793a\u7684\u65e5\u671f\u8f6c\u6362\u6210\u4e3a\u65e5\u671f\u683c\u5f0f Examples: >>> TimeFrame.int2date(20200501) datetime.date(2020, 5, 1) Args: d: YYYYMMDD\u8868\u793a\u7684\u65e5\u671f Returns: \u8f6c\u6362\u540e\u7684\u65e5\u671f \"\"\" s = str ( d ) # it's 8 times faster than arrow.get return datetime . date ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 :]))","title":"int2date()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.int2time","text":"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a datetime \u7c7b\u578b\u8868\u793a Examples: >>> TimeFrame . int2time ( 202005011500 ) datetime . datetime ( 2020 , 5 , 1 , 15 , 0 ) Parameters: Name Type Description Default tm int time in YYYYMMDDHHmm format required Returns: Type Description datetime.datetime \u8f6c\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def int2time ( cls , tm : int ) -> datetime . datetime : \"\"\"\u5c06\u6574\u6570\u8868\u793a\u7684\u65f6\u95f4\u8f6c\u6362\u4e3a`datetime`\u7c7b\u578b\u8868\u793a examples: >>> TimeFrame.int2time(202005011500) datetime.datetime(2020, 5, 1, 15, 0) Args: tm: time in YYYYMMDDHHmm format Returns: \u8f6c\u6362\u540e\u7684\u65f6\u95f4 \"\"\" s = str ( tm ) # its 8 times faster than arrow.get() return datetime . datetime ( int ( s [: 4 ]), int ( s [ 4 : 6 ]), int ( s [ 6 : 8 ]), int ( s [ 8 : 10 ]), int ( s [ 10 : 12 ]) )","title":"int2time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_bar_closed","text":"\u5224\u65ad frame \u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Parameters: Name Type Description Default frame bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 required ft FrameType bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b required Returns: Type Description bool \u662f\u5426\u5df2\u7ecf\u6536\u76d8 Source code in omicron/models/timeframe.py @classmethod def is_bar_closed ( cls , frame : Frame , ft : FrameType ) -> bool : \"\"\"\u5224\u65ad`frame`\u6240\u4ee3\u8868\u7684bar\u662f\u5426\u5df2\u7ecf\u6536\u76d8\uff08\u7ed3\u675f\uff09 \u5982\u679c\u662f\u65e5\u7ebf\uff0cframe\u4e0d\u4e3a\u5f53\u5929\uff0c\u5219\u8ba4\u4e3a\u5df2\u6536\u76d8\uff1b\u6216\u8005\u5f53\u524d\u65f6\u95f4\u5728\u6536\u76d8\u65f6\u95f4\u4e4b\u540e\uff0c\u4e5f\u8ba4\u4e3a\u5df2\u6536\u76d8\u3002 \u5982\u679c\u662f\u5176\u5b83\u5468\u671f\uff0c\u5219\u53ea\u6709\u5f53frame\u6b63\u597d\u5728\u8fb9\u754c\u4e0a\uff0c\u624d\u8ba4\u4e3a\u662f\u5df2\u6536\u76d8\u3002\u8fd9\u91cc\u6709\u4e00\u4e2a\u5047\u8bbe\uff1a\u6211\u4eec\u4e0d\u4f1a\u5728\u5176\u5b83\u5468\u671f\u4e0a\uff0c\u5224\u65ad\u672a\u6765\u7684\u67d0\u4e2aframe\u662f\u5426\u5df2\u7ecf\u6536\u76d8\u3002 Args: frame : bar\u6240\u5904\u7684\u65f6\u95f4\uff0c\u5fc5\u987b\u5c0f\u4e8e\u5f53\u524d\u65f6\u95f4 ft: bar\u6240\u4ee3\u8868\u7684\u5e27\u7c7b\u578b Returns: bool: \u662f\u5426\u5df2\u7ecf\u6536\u76d8 \"\"\" floor = cls . floor ( frame , ft ) now = arrow . now () if ft == FrameType . DAY : return floor < now . date () or now . hour >= 15 else : return floor == frame","title":"is_bar_closed()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_closing_call_auction_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_closing_call_auction_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Fixme: \u6b64\u5904\u5b9e\u73b0\u6709\u8bef\uff0c\u6536\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u5e94\u8be5\u8fd8\u5305\u542b\u4e0a\u5348\u6536\u76d8\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 15 * 60 - 3 <= minutes < 15 * 60","title":"is_closing_call_auction_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_open_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame . day_frames = np . array ([ 20200102 , 20200103 , 20200106 , 20200107 , 20200108 ]) >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-1 14:59' ) . naive ) False >>> TimeFrame . is_open_time ( arrow . get ( '2020-1-3 14:59' ) . naive ) True Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_open_time ( cls , tm : Union [ datetime . datetime , Arrow ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u5904\u5728\u4ea4\u6613\u65f6\u95f4\u6bb5\u3002 \u4ea4\u6613\u65f6\u95f4\u6bb5\u662f\u6307\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4\u6bb5\u4e4b\u5916\u7684\u5f00\u76d8\u65f6\u95f4 Examples: >>> TimeFrame.day_frames = np.array([20200102, 20200103, 20200106, 20200107, 20200108]) >>> TimeFrame.is_open_time(arrow.get('2020-1-1 14:59').naive) False >>> TimeFrame.is_open_time(arrow.get('2020-1-3 14:59').naive) True Args: tm : [description]. Defaults to None. Returns: bool \"\"\" tm = tm or arrow . now () if not cls . is_trade_day ( tm ): return False tick = tm . hour * 60 + tm . minute return tick in cls . ticks [ FrameType . MIN1 ]","title":"is_open_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_opening_call_auction_time","text":"\u5224\u65ad tm \u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Parameters: Name Type Description Default tm [description]. Defaults to None. None Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_opening_call_auction_time ( cls , tm : Union [ Arrow , datetime . datetime ] = None ) -> bool : \"\"\"\u5224\u65ad`tm`\u6307\u5b9a\u7684\u65f6\u95f4\u662f\u5426\u4e3a\u5f00\u76d8\u96c6\u5408\u7ade\u4ef7\u65f6\u95f4 Args: tm : [description]. Defaults to None. Returns: bool \"\"\" if tm is None : tm = cls . now () if not cls . is_trade_day ( tm ): return False minutes = tm . hour * 60 + tm . minute return 9 * 60 + 15 < minutes <= 9 * 60 + 25","title":"is_opening_call_auction_time()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.is_trade_day","text":"\u5224\u65ad dt \u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame . is_trade_day ( arrow . get ( '2020-1-1' )) False Parameters: Name Type Description Default dt required Returns: Type Description bool bool Source code in omicron/models/timeframe.py @classmethod def is_trade_day ( cls , dt : Union [ datetime . date , datetime . datetime , Arrow ]) -> bool : \"\"\"\u5224\u65ad`dt`\u662f\u5426\u4e3a\u4ea4\u6613\u65e5 Examples: >>> TimeFrame.is_trade_day(arrow.get('2020-1-1')) False Args: dt : Returns: bool \"\"\" return cls . date2int ( dt ) in cls . day_frames","title":"is_trade_day()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.last_min_frame","text":"\u83b7\u53d6 day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe\u3002 Examples: >>> TimeFrame . last_min_frame ( arrow . get ( '2020-1-5' ) . date (), FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 3 , 15 , 0 ) Parameters: Name Type Description Default day Union[str, Arrow, datetime.date] required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] day \u65e5\u5468\u671f\u4e3a frame_type \u7684\u7ed3\u675fframe Source code in omicron/models/timeframe.py @classmethod def last_min_frame ( cls , day : Union [ str , Arrow , datetime . date ], frame_type : FrameType ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u83b7\u53d6`day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe\u3002 Example: >>> TimeFrame.last_min_frame(arrow.get('2020-1-5').date(), FrameType.MIN30) datetime.datetime(2020, 1, 3, 15, 0) Args: day: frame_type: Returns: `day`\u65e5\u5468\u671f\u4e3a`frame_type`\u7684\u7ed3\u675fframe \"\"\" if isinstance ( day , str ): day = cls . date2int ( arrow . get ( day ) . date ()) elif isinstance ( day , arrow . Arrow ) or isinstance ( day , datetime . datetime ): day = cls . date2int ( day . date ()) elif isinstance ( day , datetime . date ): day = cls . date2int ( day ) else : raise TypeError ( f \" { type ( day ) } is not supported.\" ) if frame_type in cls . minute_level_frames : last_close_day = cls . day_frames [ cls . day_frames <= day ][ - 1 ] day = cls . int2date ( last_close_day ) return datetime . datetime ( day . year , day . month , day . day , hour = 15 , minute = 0 ) else : # pragma: no cover raise ValueError ( f \" { frame_type } not supported\" )","title":"last_min_frame()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.minute_frames_floor","text":"\u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [ 600 , 630 , 660 , 690 , 810 , 840 , 870 , 900 ] >>> TimeFrame . minute_frames_floor ( ticks , 545 ) ( 900 , - 1 ) >>> TimeFrame . minute_frames_floor ( ticks , 600 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 605 ) ( 600 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 899 ) ( 870 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 900 ) ( 900 , 0 ) >>> TimeFrame . minute_frames_floor ( ticks , 905 ) ( 900 , 0 ) Parameters: Name Type Description Default ticks np.array or list frames\u523b\u5ea6 required moment int \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 required Returns: Type Description Tuple[int, int] tuple, the first is the new moment, the second is carry-on Source code in omicron/models/timeframe.py @classmethod def minute_frames_floor ( cls , ticks , moment ) -> Tuple [ int , int ]: \"\"\" \u5bf9\u4e8e\u5206\u949f\u7ea7\u7684frame,\u8fd4\u56de\u5b83\u4eec\u4e0eframe\u523b\u5ea6\u5411\u4e0b\u5bf9\u9f50\u540e\u7684frame\u53ca\u65e5\u671f\u8fdb\u4f4d\u3002\u5982\u679c\u9700\u8981\u5bf9\u9f50\u5230\u4e0a\u4e00\u4e2a\u4ea4\u6613 \u65e5\uff0c\u5219\u8fdb\u4f4d\u4e3a-1\uff0c\u5426\u5219\u4e3a0. Examples: >>> ticks = [600, 630, 660, 690, 810, 840, 870, 900] >>> TimeFrame.minute_frames_floor(ticks, 545) (900, -1) >>> TimeFrame.minute_frames_floor(ticks, 600) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 605) (600, 0) >>> TimeFrame.minute_frames_floor(ticks, 899) (870, 0) >>> TimeFrame.minute_frames_floor(ticks, 900) (900, 0) >>> TimeFrame.minute_frames_floor(ticks, 905) (900, 0) Args: ticks (np.array or list): frames\u523b\u5ea6 moment (int): \u6574\u6570\u8868\u793a\u7684\u5206\u949f\u6570\uff0c\u6bd4\u5982900\u8868\u793a15\uff1a00 Returns: tuple, the first is the new moment, the second is carry-on \"\"\" if moment < ticks [ 0 ]: return ticks [ - 1 ], - 1 # \u2019right' \u76f8\u5f53\u4e8e ticks <= m index = np . searchsorted ( ticks , moment , side = \"right\" ) return ticks [ index - 1 ], 0","title":"minute_frames_floor()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.month_shift","text":"\u6c42 start \u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06 start \u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame . month_frames = np . array ([ 20150130 , 20150227 , 20150331 , 20150430 ]) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-26' ) . date (), 0 ) datetime . date ( 2015 , 1 , 30 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-2-27' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 0 ) datetime . date ( 2015 , 2 , 27 ) >>> TimeFrame . month_shift ( arrow . get ( '2015-3-1' ) . date (), 1 ) datetime . date ( 2015 , 3 , 31 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def month_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u6c42`start`\u6240\u5728\u7684\u6708\u79fb\u4f4d\u540e\u7684frame \u672c\u51fd\u6570\u9996\u5148\u5c06`start`\u5bf9\u9f50\uff0c\u7136\u540e\u8fdb\u884c\u79fb\u4f4d\u3002 Examples: >>> TimeFrame.month_frames = np.array([20150130, 20150227, 20150331, 20150430]) >>> TimeFrame.month_shift(arrow.get('2015-2-26').date(), 0) datetime.date(2015, 1, 30) >>> TimeFrame.month_shift(arrow.get('2015-2-27').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 0) datetime.date(2015, 2, 27) >>> TimeFrame.month_shift(arrow.get('2015-3-1').date(), 1) datetime.date(2015, 3, 31) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . month_frames , start , offset ))","title":"month_shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.replace_date","text":"\u5c06 dtm \u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a dt \u6307\u5b9a\u7684\u65e5\u671f Examples: >>> TimeFrame . replace_date ( arrow . get ( '2020-1-1 13:49' ) . datetime , datetime . date ( 2019 , 1 , 1 )) datetime . datetime ( 2019 , 1 , 1 , 13 , 49 ) Parameters: Name Type Description Default dtm datetime.datetime [description] required dt datetime.date [description] required Returns: Type Description datetime.datetime \u53d8\u6362\u540e\u7684\u65f6\u95f4 Source code in omicron/models/timeframe.py @classmethod def replace_date ( cls , dtm : datetime . datetime , dt : datetime . date ) -> datetime . datetime : \"\"\"\u5c06`dtm`\u53d8\u91cf\u7684\u65e5\u671f\u66f4\u6362\u4e3a`dt`\u6307\u5b9a\u7684\u65e5\u671f Example: >>> TimeFrame.replace_date(arrow.get('2020-1-1 13:49').datetime, datetime.date(2019, 1,1)) datetime.datetime(2019, 1, 1, 13, 49) Args: dtm (datetime.datetime): [description] dt (datetime.date): [description] Returns: \u53d8\u6362\u540e\u7684\u65f6\u95f4 \"\"\" return datetime . datetime ( dt . year , dt . month , dt . day , dtm . hour , dtm . minute , dtm . second , dtm . microsecond )","title":"replace_date()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.resample_frames","text":"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Parameters: Name Type Description Default trade_days Iterable [description] required frame_type FrameType [description] required Returns: Type Description List[int] \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a Source code in omicron/models/timeframe.py @classmethod def resample_frames ( cls , trade_days : Iterable [ datetime . date ], frame_type : FrameType ) -> List [ int ]: \"\"\"\u5c06\u4ece\u884c\u60c5\u670d\u52a1\u5668\u83b7\u53d6\u7684\u4ea4\u6613\u65e5\u5386\u91cd\u91c7\u6837\uff0c\u751f\u6210\u5468\u5e27\u548c\u6708\u7ebf\u5e27 Args: trade_days (Iterable): [description] frame_type (FrameType): [description] Returns: List[int]: \u91cd\u91c7\u6837\u540e\u7684\u65e5\u671f\u5217\u8868\uff0c\u65e5\u671f\u7528\u6574\u6570\u8868\u793a \"\"\" if frame_type == FrameType . WEEK : weeks = [] last = trade_days [ 0 ] for cur in trade_days : if cur . weekday () < last . weekday () or ( cur - last ) . days >= 7 : weeks . append ( last ) last = cur if weeks [ - 1 ] < last : weeks . append ( last ) return weeks elif frame_type == FrameType . MONTH : months = [] last = trade_days [ 0 ] for cur in trade_days : if cur . day < last . day : months . append ( last ) last = cur months . append ( last ) return months elif frame_type == FrameType . QUARTER : quarters = [] last = trade_days [ 0 ] for cur in trade_days : if last . month % 3 == 0 : if cur . month > last . month or cur . year > last . year : quarters . append ( last ) last = cur quarters . append ( last ) return quarters elif frame_type == FrameType . YEAR : years = [] last = trade_days [ 0 ] for cur in trade_days : if cur . year > last . year : years . append ( last ) last = cur years . append ( last ) return years else : # pragma: no cover raise ValueError ( f \"Unsupported FrameType: { frame_type } \" )","title":"resample_frames()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.service_degrade","text":"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 Source code in omicron/models/timeframe.py @classmethod def service_degrade ( cls ): \"\"\"\u5f53cache\u4e2d\u4e0d\u5b58\u5728\u65e5\u5386\u65f6\uff0c\u542f\u7528\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u3002 \u6ce8\u610f\uff1a\u968fomicron\u7248\u672c\u4e00\u8d77\u53d1\u884c\u65f6\u81ea\u5e26\u7684\u65e5\u5386\u5f88\u53ef\u80fd\u4e0d\u662f\u6700\u65b0\u7684\uff0c\u5e76\u4e14\u53ef\u80fd\u5305\u542b\u9519\u8bef\u3002\u6bd4\u5982\uff0c\u5b58\u5728\u8fd9\u6837\u7684\u60c5\u51b5\uff0c\u5728\u672c\u7248\u672c\u7684omicron\u53d1\u884c\u65f6\uff0c\u65e5\u5386\u66f4\u65b0\u5230\u4e862021\u5e7412\u670831\u65e5\uff0c\u5728\u8fd9\u4e4b\u524d\u7684\u65e5\u5386\u90fd\u662f\u51c6\u786e\u7684\uff0c\u4f46\u5728\u6b64\u4e4b\u540e\u7684\u65e5\u5386\uff0c\u5219\u6709\u53ef\u80fd\u51fa\u73b0\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u53ea\u5e94\u8be5\u5728\u7279\u6b8a\u7684\u60c5\u51b5\u4e0b\uff08\u6bd4\u5982\u6d4b\u8bd5\uff09\u8c03\u7528\u6b64\u65b9\u6cd5\uff0c\u4ee5\u83b7\u5f97\u4e00\u4e2a\u964d\u7ea7\u7684\u670d\u52a1\u3002 \"\"\" _dir = os . path . dirname ( __file__ ) file = os . path . join ( _dir , \"..\" , \"config\" , \"calendar.json\" ) with open ( file , \"r\" ) as f : data = json . load ( f ) for k , v in data . items (): setattr ( cls , k , np . array ( v ))","title":"service_degrade()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.shift","text":"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a frame_type \u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: day_shift week_shift month_shift Examples: >>> TimeFrame . shift ( datetime . date ( 2020 , 1 , 3 ), 1 , FrameType . DAY ) datetime . date ( 2020 , 1 , 6 ) >>> TimeFrame . shift ( datetime . datetime ( 2020 , 1 , 6 , 11 ), 1 , FrameType . MIN30 ) datetime . datetime ( 2020 , 1 , 6 , 11 , 30 ) Parameters: Name Type Description Default moment Union[Arrow, datetime.date, datetime.datetime] required n int required frame_type FrameType required Returns: Type Description Union[datetime.date, datetime.datetime] \u79fb\u4f4d\u540e\u7684Frame Source code in omicron/models/timeframe.py @classmethod def shift ( cls , moment : Union [ Arrow , datetime . date , datetime . datetime ], n : int , frame_type : FrameType , ) -> Union [ datetime . date , datetime . datetime ]: \"\"\"\u5c06\u6307\u5b9a\u7684moment\u79fb\u52a8N\u4e2a`frame_type`\u4f4d\u7f6e\u3002 \u5f53N\u4e3a\u8d1f\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u524d\u79fb\u52a8\uff1b\u5f53N\u4e3a\u6b63\u6570\u65f6\uff0c\u610f\u5473\u7740\u5411\u540e\u79fb\u52a8\u3002\u5982\u679cn\u4e3a\u96f6\uff0c\u610f\u5473\u7740\u79fb\u52a8\u5230\u6700\u63a5\u8fd1 \u7684\u4e00\u4e2a\u5df2\u7ed3\u675f\u7684frame\u3002 \u5982\u679cmoment\u6ca1\u6709\u5bf9\u9f50\u5230frame_type\u5bf9\u5e94\u7684\u65f6\u95f4\uff0c\u5c06\u9996\u5148\u8fdb\u884c\u5bf9\u9f50\u3002 See also: - [day_shift][omicron.models.timeframe.TimeFrame.day_shift] - [week_shift][omicron.models.timeframe.TimeFrame.week_shift] - [month_shift][omicron.models.timeframe.TimeFrame.month_shift] Examples: >>> TimeFrame.shift(datetime.date(2020, 1, 3), 1, FrameType.DAY) datetime.date(2020, 1, 6) >>> TimeFrame.shift(datetime.datetime(2020, 1, 6, 11), 1, FrameType.MIN30) datetime.datetime(2020, 1, 6, 11, 30) Args: moment: n: frame_type: Returns: \u79fb\u4f4d\u540e\u7684Frame \"\"\" if frame_type == FrameType . DAY : return cls . day_shift ( moment , n ) elif frame_type == FrameType . WEEK : return cls . week_shift ( moment , n ) elif frame_type == FrameType . MONTH : return cls . month_shift ( moment , n ) elif frame_type in [ FrameType . MIN1 , FrameType . MIN5 , FrameType . MIN15 , FrameType . MIN30 , FrameType . MIN60 , ]: tm = moment . hour * 60 + moment . minute new_tick_pos = cls . ticks [ frame_type ] . index ( tm ) + n days = new_tick_pos // len ( cls . ticks [ frame_type ]) min_part = new_tick_pos % len ( cls . ticks [ frame_type ]) date_part = cls . day_shift ( moment . date (), days ) minutes = cls . ticks [ frame_type ][ min_part ] h , m = minutes // 60 , minutes % 60 return datetime . datetime ( date_part . year , date_part . month , date_part . day , h , m , tzinfo = moment . tzinfo , ) else : # pragma: no cover raise ValueError ( f \" { frame_type } is not supported.\" )","title":"shift()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.time2int","text":"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame . time2int ( datetime . datetime ( 2020 , 5 , 1 , 15 )) 202005011500 Parameters: Name Type Description Default tm Union[datetime.datetime, Arrow] required Returns: Type Description int \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 Source code in omicron/models/timeframe.py @classmethod def time2int ( cls , tm : Union [ datetime . datetime , Arrow ]) -> int : \"\"\"\u5c06\u65f6\u95f4\u7c7b\u578b\u8f6c\u6362\u4e3a\u6574\u6570\u7c7b\u578b tm\u53ef\u4ee5\u662fArrow\u7c7b\u578b\uff0c\u4e5f\u53ef\u4ee5\u662fdatetime.datetime\u6216\u8005\u4efb\u4f55\u5176\u5b83\u7c7b\u578b\uff0c\u53ea\u8981\u5b83\u6709year,month...\u7b49 \u5c5e\u6027 Examples: >>> TimeFrame.time2int(datetime.datetime(2020, 5, 1, 15)) 202005011500 Args: tm: Returns: \u8f6c\u6362\u540e\u7684\u6574\u6570\uff0c\u6bd4\u59822020050115 \"\"\" return int ( f \" { tm . year : 04 }{ tm . month : 02 }{ tm . day : 02 }{ tm . hour : 02 }{ tm . minute : 02 } \" )","title":"time2int()"},{"location":"api/timeframe/#omicron.models.timeframe.TimeFrame.week_shift","text":"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 omicron.models.timeframe.TimeFrame.day_shift Examples: >>> TimeFrame . week_frames = np . array ([ 20200103 , 20200110 , 20200117 , 20200123 , 20200207 , 20200214 ]) >>> moment = arrow . get ( '2020-1-21' ) . date () >>> TimeFrame . week_shift ( moment , 1 ) datetime . date ( 2020 , 1 , 23 ) >>> TimeFrame . week_shift ( moment , 0 ) datetime . date ( 2020 , 1 , 17 ) >>> TimeFrame . week_shift ( moment , - 1 ) datetime . date ( 2020 , 1 , 10 ) Returns: Type Description datetime.date \u79fb\u4f4d\u540e\u7684\u65e5\u671f Source code in omicron/models/timeframe.py @classmethod def week_shift ( cls , start : datetime . date , offset : int ) -> datetime . date : \"\"\"\u5bf9\u6307\u5b9a\u65e5\u671f\u6309\u5468\u7ebf\u5e27\u8fdb\u884c\u524d\u540e\u79fb\u4f4d\u64cd\u4f5c \u53c2\u8003 [omicron.models.timeframe.TimeFrame.day_shift][] Examples: >>> TimeFrame.week_frames = np.array([20200103, 20200110, 20200117, 20200123,20200207, 20200214]) >>> moment = arrow.get('2020-1-21').date() >>> TimeFrame.week_shift(moment, 1) datetime.date(2020, 1, 23) >>> TimeFrame.week_shift(moment, 0) datetime.date(2020, 1, 17) >>> TimeFrame.week_shift(moment, -1) datetime.date(2020, 1, 10) Returns: \u79fb\u4f4d\u540e\u7684\u65e5\u671f \"\"\" start = cls . date2int ( start ) return cls . int2date ( ext . shift ( cls . week_frames , start , offset )) rendering: heading_level: 1","title":"week_shift()"},{"location":"api/triggers/","text":"\u5728apscheduler.triggers\u7684\u57fa\u7840\u4e0a\u63d0\u4f9b\u4e86FrameTrigger\u548cIntervalTrigger\uff0c\u4f7f\u5f97\u5b83\u4eec\u53ea\u5728\u4ea4\u6613\u65e5\uff08\u6216\u8005 \u57fa\u4e8e\u4ea4\u6613\u65e5+\u5ef6\u65f6\uff09\u65f6\u6fc0\u53d1\u3002 FrameTrigger ( BaseTrigger ) \u00b6 A cron like trigger fires on each valid Frame Source code in omicron/core/triggers.py class FrameTrigger ( BaseTrigger ): \"\"\" A cron like trigger fires on each valid Frame \"\"\" def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . frame_type . value } : { self . jitter } \" def get_next_fire_time ( self , previous_fire_time : Union [ datetime . date , datetime . datetime ], now : Union [ datetime . date , datetime . datetime ], ): \"\"\"\"\"\" ft = self . frame_type # `now` is timezone aware, while ceiling isn't now = now . replace ( tzinfo = None ) next_tick = now next_frame = TimeFrame . ceiling ( now , ft ) while next_tick <= now : if ft in TimeFrame . day_level_frames : next_tick = TimeFrame . combine_time ( next_frame , 15 ) + self . jitter else : next_tick = next_frame + self . jitter if next_tick > now : tz = tzlocal . get_localzone () return next_tick . astimezone ( tz ) else : next_frame = TimeFrame . shift ( next_frame , 1 , ft ) __init__ ( self , frame_type , jitter = None ) special \u00b6 \u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a r\"([-]?)(\\d+)([mshd])\" \uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a m (\u5206\u949f), s (\u79d2), h \uff08\u5c0f\u65f6\uff09, d (\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger ( FrameType . MIN30 , '-15s' ) < omicron . core . triggers . FrameTrigger object at 0 x ...> Parameters: Name Type Description Default frame_type Union[str, coretypes.types.FrameType] required jitter str \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 None Source code in omicron/core/triggers.py def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) TradeTimeIntervalTrigger ( BaseTrigger ) \u00b6 \u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger Source code in omicron/core/triggers.py class TradeTimeIntervalTrigger ( BaseTrigger ): \"\"\"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger\"\"\" def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . interval . seconds } \" def get_next_fire_time ( self , previous_fire_time : Optional [ datetime . datetime ], now : Optional [ datetime . datetime ], ): \"\"\"\"\"\" if previous_fire_time is not None : fire_time = previous_fire_time + self . interval else : fire_time = now if TimeFrame . date2int ( fire_time . date ()) not in TimeFrame . day_frames : ft = TimeFrame . day_shift ( now , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time minutes = fire_time . hour * 60 + fire_time . minute if minutes < 570 : fire_time = fire_time . replace ( hour = 9 , minute = 30 , second = 0 , microsecond = 0 ) elif 690 < minutes < 780 : fire_time = fire_time . replace ( hour = 13 , minute = 0 , second = 0 , microsecond = 0 ) elif minutes > 900 : ft = TimeFrame . day_shift ( fire_time , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time __init__ ( self , interval ) special \u00b6 \u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a r\"(\\d+)([mshd])\" \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 interval \u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Parameters: Name Type Description Default interval [description] required Exceptions: Type Description ValueError [description] Source code in omicron/core/triggers.py def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval )","title":"Triggers"},{"location":"api/triggers/#omicron.core.triggers.FrameTrigger","text":"A cron like trigger fires on each valid Frame Source code in omicron/core/triggers.py class FrameTrigger ( BaseTrigger ): \"\"\" A cron like trigger fires on each valid Frame \"\"\" def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . frame_type . value } : { self . jitter } \" def get_next_fire_time ( self , previous_fire_time : Union [ datetime . date , datetime . datetime ], now : Union [ datetime . date , datetime . datetime ], ): \"\"\"\"\"\" ft = self . frame_type # `now` is timezone aware, while ceiling isn't now = now . replace ( tzinfo = None ) next_tick = now next_frame = TimeFrame . ceiling ( now , ft ) while next_tick <= now : if ft in TimeFrame . day_level_frames : next_tick = TimeFrame . combine_time ( next_frame , 15 ) + self . jitter else : next_tick = next_frame + self . jitter if next_tick > now : tz = tzlocal . get_localzone () return next_tick . astimezone ( tz ) else : next_frame = TimeFrame . shift ( next_frame , 1 , ft )","title":"FrameTrigger"},{"location":"api/triggers/#omicron.core.triggers.FrameTrigger.__init__","text":"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a r\"([-]?)(\\d+)([mshd])\" \uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a m (\u5206\u949f), s (\u79d2), h \uff08\u5c0f\u65f6\uff09, d (\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger ( FrameType . MIN30 , '-15s' ) < omicron . core . triggers . FrameTrigger object at 0 x ...> Parameters: Name Type Description Default frame_type Union[str, coretypes.types.FrameType] required jitter str \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 None Source code in omicron/core/triggers.py def __init__ ( self , frame_type : Union [ str , FrameType ], jitter : str = None ): \"\"\"\u6784\u9020\u51fd\u6570 jitter\u7684\u683c\u5f0f\u7528\u6b63\u5219\u5f0f\u8868\u8fbe\u4e3a`r\"([-]?)(\\\\d+)([mshd])\"`\uff0c\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u7b26\u53f7\uff0c'-'\u8868\u793a\u63d0\u524d\uff1b \u7b2c\u4e8c\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e09\u7ec4\u4e3a\u5355\u4f4d\uff0c\u53ef\u4ee5\u4e3a`m`(\u5206\u949f), `s`(\u79d2), `h`\uff08\u5c0f\u65f6\uff09,`d`(\u5929)\u3002 \u4e0b\u9762\u7684\u793a\u4f8b\u6784\u9020\u4e86\u4e00\u4e2a\u53ea\u5728\u4ea4\u6613\u65e5\uff0c\u6bcf30\u5206\u949f\u89e6\u53d1\u4e00\u6b21\uff0c\u6bcf\u6b21\u63d0\u524d15\u79d2\u89e6\u7684trigger\u3002\u5373\u5b83\u7684\u89e6\u53d1\u65f6 \u95f4\u662f\u6bcf\u4e2a\u4ea4\u6613\u65e5\u768409:29:45, 09:59:45, ... Examples: >>> FrameTrigger(FrameType.MIN30, '-15s') # doctest: +ELLIPSIS Args: frame_type: jitter: \u5355\u4f4d\u79d2\u3002\u5176\u4e2doffset\u5fc5\u987b\u5728\u4e00\u4e2aFrameType\u7684\u957f\u5ea6\u4ee5\u5185 \"\"\" self . frame_type = FrameType ( frame_type ) if jitter is None : _jitter = 0 else : matched = re . match ( r \"([-]?)(\\d+)([mshd])\" , jitter ) if matched is None : # pragma: no cover raise ValueError ( \"malformed. jitter should be [-](number)(unit), \" \"for example, -30m, or 30s\" ) sign , num , unit = matched . groups () num = int ( num ) if unit . lower () == \"m\" : _jitter = 60 * num elif unit . lower () == \"s\" : _jitter = num elif unit . lower () == \"h\" : _jitter = 3600 * num elif unit . lower () == \"d\" : _jitter = 3600 * 24 * num else : # pragma: no cover raise ValueError ( \"bad time unit. only s,h,m,d is acceptable\" ) if sign == \"-\" : _jitter = - _jitter self . jitter = datetime . timedelta ( seconds = _jitter ) if ( frame_type == FrameType . MIN1 and abs ( _jitter ) >= 60 or frame_type == FrameType . MIN5 and abs ( _jitter ) >= 300 or frame_type == FrameType . MIN15 and abs ( _jitter ) >= 900 or frame_type == FrameType . MIN30 and abs ( _jitter ) >= 1800 or frame_type == FrameType . MIN60 and abs ( _jitter ) >= 3600 or frame_type == FrameType . DAY and abs ( _jitter ) >= 24 * 3600 # it's still not allowed if offset > week, month, etc. Would anybody # really specify an offset longer than that? ): raise ValueError ( \"offset must be less than frame length\" )","title":"__init__()"},{"location":"api/triggers/#omicron.core.triggers.TradeTimeIntervalTrigger","text":"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger Source code in omicron/core/triggers.py class TradeTimeIntervalTrigger ( BaseTrigger ): \"\"\"\u53ea\u5728\u4ea4\u6613\u65f6\u95f4\u89e6\u53d1\u7684\u56fa\u5b9a\u95f4\u9694\u7684trigger\"\"\" def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval ) def __str__ ( self ): return f \" { self . __class__ . __name__ } : { self . interval . seconds } \" def get_next_fire_time ( self , previous_fire_time : Optional [ datetime . datetime ], now : Optional [ datetime . datetime ], ): \"\"\"\"\"\" if previous_fire_time is not None : fire_time = previous_fire_time + self . interval else : fire_time = now if TimeFrame . date2int ( fire_time . date ()) not in TimeFrame . day_frames : ft = TimeFrame . day_shift ( now , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time minutes = fire_time . hour * 60 + fire_time . minute if minutes < 570 : fire_time = fire_time . replace ( hour = 9 , minute = 30 , second = 0 , microsecond = 0 ) elif 690 < minutes < 780 : fire_time = fire_time . replace ( hour = 13 , minute = 0 , second = 0 , microsecond = 0 ) elif minutes > 900 : ft = TimeFrame . day_shift ( fire_time , 1 ) fire_time = datetime . datetime ( ft . year , ft . month , ft . day , 9 , 30 , tzinfo = fire_time . tzinfo ) return fire_time","title":"TradeTimeIntervalTrigger"},{"location":"api/triggers/#omicron.core.triggers.TradeTimeIntervalTrigger.__init__","text":"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a r\"(\\d+)([mshd])\" \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 interval \u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Parameters: Name Type Description Default interval [description] required Exceptions: Type Description ValueError [description] Source code in omicron/core/triggers.py def __init__ ( self , interval : str ): \"\"\"\u6784\u9020\u51fd\u6570 interval\u7684\u683c\u5f0f\u7528\u6b63\u5219\u8868\u8fbe\u5f0f\u8868\u793a\u4e3a `r\"(\\\\d+)([mshd])\"` \u3002\u5176\u4e2d\u7b2c\u4e00\u7ec4\u4e3a\u6570\u5b57\uff0c\u7b2c\u4e8c\u7ec4\u4e3a\u5355\u4f4d\u3002\u6709\u6548\u7684 `interval`\u5982 1 \uff0c\u8868\u793a\u6bcf1\u5c0f\u65f6\u89e6\u53d1\u4e00\u6b21\uff0c\u5219\u8be5\u89e6\u53d1\u5668\u5c06\u5728\u4ea4\u6613\u65e5\u768410:30, 11:30, 14:00\u548c 15\uff1a00\u5404\u89e6\u53d1\u4e00\u6b21 Args: interval : [description] Raises: ValueError: [description] \"\"\" matched = re . match ( r \"(\\d+)([mshd])\" , interval ) if matched is None : raise ValueError ( f \"malform interval { interval } \" ) interval , unit = matched . groups () interval = int ( interval ) unit = unit . lower () if unit == \"s\" : self . interval = datetime . timedelta ( seconds = interval ) elif unit == \"m\" : self . interval = datetime . timedelta ( minutes = interval ) elif unit == \"h\" : self . interval = datetime . timedelta ( hours = interval ) elif unit == \"d\" : self . interval = datetime . timedelta ( days = interval ) else : self . interval = datetime . timedelta ( seconds = interval )","title":"__init__()"},{"location":"api/dal/flux/","text":"Flux - the query language builder for influxdb \u00b6 Helper functions for building flux query expression Source code in omicron/dal/influx/flux.py class Flux ( object ): \"\"\"Helper functions for building flux query expression\"\"\" EPOCH_START = datetime . datetime ( 1970 , 1 , 1 , 0 , 0 , 0 ) def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols def __str__ ( self ): return self . _compose () def __repr__ ( self ) -> str : return f \"< { self . __class__ . __name__ } >: \\n { self . _compose () } \" def _compose ( self ): \"\"\"\u5c06\u6240\u6709\u8868\u8fbe\u5f0f\u5408\u5e76\u4e3a\u4e00\u4e2a\u8868\u8fbe\u5f0f\"\"\" if not all ( [ \"bucket\" in self . expressions , \"measurement\" in self . expressions , \"range\" in self . expressions , ] ): raise AssertionError ( \"bucket, measurement and range must be set\" ) expr = [ self . expressions [ k ] for k in ( \"bucket\" , \"range\" , \"measurement\" )] if self . expressions . get ( \"tags\" ): expr . append ( self . expressions [ \"tags\" ]) if self . expressions . get ( \"fields\" ): expr . append ( self . expressions [ \"fields\" ]) if \"drop\" not in self . expressions and self . no_sys_cols : self . drop_sys_cols () if self . expressions . get ( \"drop\" ): expr . append ( self . expressions [ \"drop\" ]) if self . _auto_pivot and \"pivot\" not in self . expressions : self . pivot () if self . expressions . get ( \"pivot\" ): expr . append ( self . expressions [ \"pivot\" ]) if self . expressions . get ( \"group\" ): expr . append ( self . expressions [ \"group\" ]) if self . expressions . get ( \"sort\" ): expr . append ( self . expressions [ \"sort\" ]) if self . expressions . get ( \"limit\" ): expr . append ( self . expressions [ \"limit\" ]) # influxdb\u9ed8\u8ba4\u6309\u5347\u5e8f\u6392\u5217\uff0c\u4f46last_n\u67e5\u8be2\u7684\u7ed3\u679c\u5219\u5fc5\u7136\u662f\u964d\u5e8f\u7684\uff0c\u6240\u4ee5\u8fd8\u9700\u8981\u518d\u6b21\u6392\u5e8f if self . _last_n : expr . append ( \" \\n \" . join ( [ f ' |> top(n: { self . _last_n } , columns: [\"_time\"])' , ' |> sort(columns: [\"_time\"], desc: false)' , ] ) ) return \" \\n \" . join ( expr ) def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ])) @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self @property def cols ( self ) -> List [ str ]: \"\"\"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: the columns name of the return records \"\"\" # fixme: if keep in expression, then return group key + tag key + value key # if keep not in expression, then stream, table, _time, ... return sorted ( self . _cols ) def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols ) cols : List [ str ] property readonly \u00b6 the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: Type Description List[str] the columns name of the return records __init__ ( self , auto_pivot = True , no_sys_cols = True ) special \u00b6 \u521d\u59cb\u5316Flux\u5bf9\u8c61 Parameters: Name Type Description Default auto_pivot \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. True no_sys_cols \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003 drop_sys_cols True Source code in omicron/dal/influx/flux.py def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols bucket ( self , bucket ) \u00b6 add bucket to query expression Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61 Source code in omicron/dal/influx/flux.py def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self delete ( self , measurement , stop , tags = {}, start = None , precision = 's' ) \u00b6 \u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to delete-predicate , delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7 tags \u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Parameters: Name Type Description Default measurement [description] required stop [description] required tags \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 {} start \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. None precision \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d 's' Returns: Type Description dict \u5220\u9664\u8bed\u53e5 Source code in omicron/dal/influx/flux.py def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command drop ( self , cols ) \u00b6 use this to drop columns before return result Parameters: Name Type Description Default cols the name of columns to be dropped required Returns: Type Description Flux Flux object, to support pipe operation Source code in omicron/dal/influx/flux.py def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self drop_sys_cols ( self , cols = None ) \u00b6 use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in cols , before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in cols parameter. Parameters: Name Type Description Default cols the extra columns to be dropped None Returns: Type Description Flux Flux query object Source code in omicron/dal/influx/flux.py def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols ) fields ( self , fields , reserve_time_stamp = True ) \u00b6 \u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default fields List \u5f85\u67e5\u8be2\u7684field\u5217\u8868 required reserve_time_stamp bool \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233 _time \uff0c\u9ed8\u8ba4\u4e3aTrue True Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self format_time ( tm , precision = 's' , shift_forward = False ) classmethod \u00b6 \u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06 end \u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165 shift_forward = True \u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux . format_time ( datetime . date ( 2019 , 1 , 1 )) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux . format_time ( datetime . datetime ( 1978 , 7 , 8 , 12 , 34 , 56 , 123456 ), precision = \"ms\" ) '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux . format_time ( datetime . date ( 1978 , 7 , 8 ), shift_forward = True ) '1978-07-08T00:00:01Z' Parameters: Name Type Description Default tm \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' 's' shift_forward \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 False Returns: Type Description str \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 Source code in omicron/dal/influx/flux.py @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" group ( self , by ) \u00b6 [summary] Returns: Type Description Flux [description] Source code in omicron/dal/influx/flux.py def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self latest ( self , n ) \u00b6 \u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Parameters: Name Type Description Default n int \u6700\u540en\u6761\u6570\u636e required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self limit ( self , limit ) \u00b6 \u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default limit int \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self measurement ( self , measurement ) \u00b6 add measurement filter to query Exceptions: Type Description DuplicateOperationError \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self pivot ( self , row_keys = [ '_time' ], column_keys = [ '_field' ], value_column = '_value' ) \u00b6 pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 pivot Parameters: Name Type Description Default row_keys List[str] \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] ['_time'] column_keys \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] ['_field'] value_column str \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" '_value' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self range ( self , start , end , right_close = True , precision = 's' ) \u00b6 \u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e precision \u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53 right_close \u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539 end \u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4 end \u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b end \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default start Union[datetime.date, datetime.datetime] \u5f00\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4 required right_close \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 True precision \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self sort ( self , by = None , desc = False ) \u00b6 \u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e influxdb doc , \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Parameters: Name Type Description Default by List[str] \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 None Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self tags ( self , tags ) \u00b6 \u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528 contains \u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528 or \u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or )\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default tags tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 required Examples: >>> flux = Flux () >>> flux . tags ({ \"code\" : [ \"000001\" , \"000002\" ], \"name\" : [ \"\u6d66\u53d1\u94f6\u884c\" ]}) . expressions [ \"tags\" ] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self to_timestamp ( tm , precision = 's' ) classmethod \u00b6 \u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c tm \u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Parameters: Name Type Description Default tm Union[datetime.date, datetime.datetime] \u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description int \u65f6\u95f4\u6233 Source code in omicron/dal/influx/flux.py @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ]))","title":"Flux"},{"location":"api/dal/flux/#flux---the-query-language-builder-for-influxdb","text":"Helper functions for building flux query expression Source code in omicron/dal/influx/flux.py class Flux ( object ): \"\"\"Helper functions for building flux query expression\"\"\" EPOCH_START = datetime . datetime ( 1970 , 1 , 1 , 0 , 0 , 0 ) def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols def __str__ ( self ): return self . _compose () def __repr__ ( self ) -> str : return f \"< { self . __class__ . __name__ } >: \\n { self . _compose () } \" def _compose ( self ): \"\"\"\u5c06\u6240\u6709\u8868\u8fbe\u5f0f\u5408\u5e76\u4e3a\u4e00\u4e2a\u8868\u8fbe\u5f0f\"\"\" if not all ( [ \"bucket\" in self . expressions , \"measurement\" in self . expressions , \"range\" in self . expressions , ] ): raise AssertionError ( \"bucket, measurement and range must be set\" ) expr = [ self . expressions [ k ] for k in ( \"bucket\" , \"range\" , \"measurement\" )] if self . expressions . get ( \"tags\" ): expr . append ( self . expressions [ \"tags\" ]) if self . expressions . get ( \"fields\" ): expr . append ( self . expressions [ \"fields\" ]) if \"drop\" not in self . expressions and self . no_sys_cols : self . drop_sys_cols () if self . expressions . get ( \"drop\" ): expr . append ( self . expressions [ \"drop\" ]) if self . _auto_pivot and \"pivot\" not in self . expressions : self . pivot () if self . expressions . get ( \"pivot\" ): expr . append ( self . expressions [ \"pivot\" ]) if self . expressions . get ( \"group\" ): expr . append ( self . expressions [ \"group\" ]) if self . expressions . get ( \"sort\" ): expr . append ( self . expressions [ \"sort\" ]) if self . expressions . get ( \"limit\" ): expr . append ( self . expressions [ \"limit\" ]) # influxdb\u9ed8\u8ba4\u6309\u5347\u5e8f\u6392\u5217\uff0c\u4f46last_n\u67e5\u8be2\u7684\u7ed3\u679c\u5219\u5fc5\u7136\u662f\u964d\u5e8f\u7684\uff0c\u6240\u4ee5\u8fd8\u9700\u8981\u518d\u6b21\u6392\u5e8f if self . _last_n : expr . append ( \" \\n \" . join ( [ f ' |> top(n: { self . _last_n } , columns: [\"_time\"])' , ' |> sort(columns: [\"_time\"], desc: false)' , ] ) ) return \" \\n \" . join ( expr ) def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ])) @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\" def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self @property def cols ( self ) -> List [ str ]: \"\"\"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: the columns name of the return records \"\"\" # fixme: if keep in expression, then return group key + tag key + value key # if keep not in expression, then stream, table, _time, ... return sorted ( self . _cols ) def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols )","title":"Flux - the query language builder for influxdb"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.cols","text":"the columns or the return records the implementation is buggy. Influx doesn't tell us in which order these columns are. Returns: Type Description List[str] the columns name of the return records","title":"cols"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.__init__","text":"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Parameters: Name Type Description Default auto_pivot \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. True no_sys_cols \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003 drop_sys_cols True Source code in omicron/dal/influx/flux.py def __init__ ( self , auto_pivot = True , no_sys_cols = True ): \"\"\"\u521d\u59cb\u5316Flux\u5bf9\u8c61 Args: auto_pivot : \u662f\u5426\u81ea\u52a8\u5c06\u67e5\u8be2\u5217\u5b57\u6bb5\u7ec4\u88c5\u6210\u884c. Defaults to True. no_sys_cols: \u662f\u5426\u81ea\u52a8\u5c06\u7cfb\u7edf\u5b57\u6bb5\u5220\u9664. Defaults to True.\u8bf7\u53c2\u8003[drop_sys_cols][omicron.dal.influx.flux.Flux.drop_sys_cols] \"\"\" self . _cols = None self . expressions = defaultdict ( list ) self . _auto_pivot = auto_pivot self . _last_n = None self . no_sys_cols = no_sys_cols","title":"__init__()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.bucket","text":"add bucket to query expression Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61 Source code in omicron/dal/influx/flux.py def bucket ( self , bucket : str ) -> \"Flux\" : \"\"\"add bucket to query expression Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2asource\uff0c\u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u6307\u5b9a\u4e86bucket\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61 \"\"\" if \"bucket\" in self . expressions : raise DuplicateOperationError ( \"bucket has been set\" ) self . expressions [ \"bucket\" ] = f 'from(bucket: \" { bucket } \")' return self","title":"bucket()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.delete","text":"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to delete-predicate , delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7 tags \u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Parameters: Name Type Description Default measurement [description] required stop [description] required tags \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 {} start \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. None precision \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d 's' Returns: Type Description dict \u5220\u9664\u8bed\u53e5 Source code in omicron/dal/influx/flux.py def delete ( self , measurement : str , stop : datetime . datetime , tags : dict = {}, start : datetime . datetime = None , precision : str = \"s\" , ) -> dict : \"\"\"\u6784\u5efa\u5220\u9664\u8bed\u53e5\u3002 according to [delete-predicate](https://docs.influxdata.com/influxdb/v2.1/reference/syntax/delete-predicate/), delete\u53ea\u652f\u6301AND\u903b\u8f91\u64cd\u4f5c\uff0c\u53ea\u652f\u6301\u201c=\u201d\u64cd\u4f5c\uff0c\u4e0d\u652f\u6301\u201c\uff01=\u201d\u64cd\u4f5c\uff0c\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5b57\u6bb5\u6216\u8005tag\uff0c\u4f46\u4e0d\u5305\u62ec_time\u548c_value\u5b57\u6bb5\u3002 \u7531\u4e8einfluxdb\u8fd9\u4e00\u6bb5\u6587\u6863\u4e0d\u662f\u5f88\u6e05\u695a\uff0c\u6839\u636e\u8bd5\u9a8c\u7ed3\u679c\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u6309\u65f6\u95f4\u8303\u56f4\u548ctags\u8fdb\u884c\u5220\u9664\u8f83\u597d\u3002\u5982\u679c\u67d0\u4e2acolumn\u7684\u503c\u7c7b\u578b\u662f\u5b57\u7b26\u4e32\uff0c\u5219\u4e5f\u53ef\u4ee5\u901a\u8fc7`tags`\u53c2\u6570\u4f20\u5165\uff0c\u5339\u914d\u540e\u5220\u9664\u3002\u4f46\u5982\u679c\u4f20\u5165\u4e86\u975e\u5b57\u7b26\u4e32\u7c7b\u578b\u7684column\uff0c\u5219\u5c06\u5f97\u5230\u65e0\u6cd5\u9884\u6599\u7684\u7ed3\u679c\u3002 Args: measurement : [description] stop : [description] tags : \u6309tags\u548c\u5339\u914d\u7684\u503c\u8fdb\u884c\u5220\u9664\u3002\u4f20\u5165\u7684tags\u4e2d\uff0ckey\u4e3atag\u540d\u79f0\uff0cvalue\u4e3atag\u8981\u5339\u914d\u7684\u53d6\u503c\uff0c\u53ef\u4ee5\u4e3astr\u6216\u8005List[str]\u3002 start : \u8d77\u59cb\u65f6\u95f4\u3002\u5982\u679c\u7701\u7565\uff0c\u5219\u4f7f\u7528EPOCH_START. precision : \u65f6\u95f4\u7cbe\u5ea6\u3002\u53ef\u4ee5\u4e3a\u201cs\u201d\uff0c\u201cms\u201d\uff0c\u201cus\u201d Returns: \u5220\u9664\u8bed\u53e5 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if start is None : start = self . EPOCH_START . isoformat ( timespec = timespec ) + \"Z\" predicate = [ f '_measurement=\" { measurement } \"' ] for key , value in tags . items (): if isinstance ( value , list ): predicate . extend ([ f ' { key } = \" { v } \"' for v in value ]) else : predicate . append ( f ' { key } = \" { value } \"' ) command = { \"start\" : start , \"stop\" : f \" { stop . isoformat ( timespec = timespec ) } Z\" , \"predicate\" : \" AND \" . join ( predicate ), } return command","title":"delete()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.drop","text":"use this to drop columns before return result Parameters: Name Type Description Default cols the name of columns to be dropped required Returns: Type Description Flux Flux object, to support pipe operation Source code in omicron/dal/influx/flux.py def drop ( self , cols : List [ str ]) -> \"Flux\" : \"\"\"use this to drop columns before return result Args: cols : the name of columns to be dropped Returns: Flux object, to support pipe operation \"\"\" if \"drop\" in self . expressions : raise DuplicateOperationError ( \"drop operation has been set already\" ) # add surrounding quotes _cols = [ f '\" { c } \"' for c in cols ] self . expressions [ \"drop\" ] = f \" |> drop(columns: [ { ',' . join ( _cols ) } ])\" return self","title":"drop()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.drop_sys_cols","text":"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in cols , before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in cols parameter. Parameters: Name Type Description Default cols the extra columns to be dropped None Returns: Type Description Flux Flux query object Source code in omicron/dal/influx/flux.py def drop_sys_cols ( self , cols : List [ str ] = None ) -> \"Flux\" : \"\"\"use this to drop [\"_start\", \"_stop\", \"_measurement\"], plus columns specified in `cols`, before return query result please be noticed, after drop sys columns, there's still two sys columns left, which is \"_time\" and \"table\", and \"_time\" should usually be kept, \"table\" is one we're not able to removed. If you don't like _time in return result, you can specify it in `cols` parameter. Args: cols : the extra columns to be dropped Returns: Flux query object \"\"\" _cols = [ \"_start\" , \"_stop\" , \"_measurement\" ] if cols is not None : _cols . extend ( cols ) return self . drop ( _cols )","title":"drop_sys_cols()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.fields","text":"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default fields List \u5f85\u67e5\u8be2\u7684field\u5217\u8868 required reserve_time_stamp bool \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233 _time \uff0c\u9ed8\u8ba4\u4e3aTrue True Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def fields ( self , fields : List , reserve_time_stamp : bool = True ) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0field\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u7528\u4ee5\u6307\u5b9a\u54ea\u4e9bfield\u4f1a\u51fa\u73b0\u5728\u67e5\u8be2\u7ed3\u679c\u4e2d\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2a_field\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2a_field\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679cfiled filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: fields: \u5f85\u67e5\u8be2\u7684field\u5217\u8868 reserve_time_stamp: \u662f\u5426\u4fdd\u7559\u65f6\u95f4\u6233`_time`\uff0c\u9ed8\u8ba4\u4e3aTrue Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"fields\" in self . expressions : raise DuplicateOperationError ( \"fields has been set\" ) self . _cols = fields . copy () if reserve_time_stamp and \"_time\" not in self . _cols : self . _cols . append ( \"_time\" ) self . _cols = sorted ( self . _cols ) filters = [ f 'r[\"_field\"] == \" { name } \"' for name in self . _cols ] self . expressions [ \"fields\" ] = f \" |> filter(fn: (r) => { ' or ' . join ( filters ) } )\" return self","title":"fields()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.format_time","text":"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06 end \u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165 shift_forward = True \u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux . format_time ( datetime . date ( 2019 , 1 , 1 )) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux . format_time ( datetime . datetime ( 1978 , 7 , 8 , 12 , 34 , 56 , 123456 ), precision = \"ms\" ) '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux . format_time ( datetime . date ( 1978 , 7 , 8 ), shift_forward = True ) '1978-07-08T00:00:01Z' Parameters: Name Type Description Default tm \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' 's' shift_forward \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 False Returns: Type Description str \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 Source code in omicron/dal/influx/flux.py @classmethod def format_time ( cls , tm : Frame , precision : str = \"s\" , shift_forward = False ) -> str : \"\"\"\u5c06\u65f6\u95f4\u8f6c\u6362\u6210\u5ba2\u6237\u7aef\u5bf9\u5e94\u7684\u7cbe\u5ea6\uff0c\u5e76\u4ee5 RFC3339 timestamps\u683c\u5f0f\u4e32\uff08\u5373influxdb\u8981\u6c42\u7684\u683c\u5f0f\uff09\u8fd4\u56de\u3002 \u5982\u679c\u8fd9\u4e2a\u65f6\u95f4\u662f\u4f5c\u4e3a\u67e5\u8be2\u7684range\u4e2d\u7684\u7ed3\u675f\u65f6\u95f4\u4f7f\u7528\u65f6\uff0c\u7531\u4e8einflux\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u662f\u5de6\u95ed\u53f3\u5f00\u7684\uff0c\u56e0\u6b64\u5982\u679c\u4f60\u9700\u8981\u67e5\u8be2\u7684\u662f\u4e00\u4e2a\u95ed\u533a\u95f4\uff0c\u5219\u9700\u8981\u5c06`end`\u7684\u65f6\u95f4\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6\u3002\u901a\u8fc7\u4f20\u5165`shift_forward = True`\u53ef\u4ee5\u5b8c\u6210\u8fd9\u79cd\u8f6c\u6362\u3002 Examples: >>> # by default, the precision is seconds, and convert a date >>> Flux.format_time(datetime.date(2019, 1, 1)) '2019-01-01T00:00:00Z' >>> # set precision to ms, convert a time >>> Flux.format_time(datetime.datetime(1978, 7, 8, 12, 34, 56, 123456), precision=\"ms\") '1978-07-08T12:34:56.123Z' >>> # convert and forward shift >>> Flux.format_time(datetime.date(1978, 7, 8), shift_forward = True) '1978-07-08T00:00:01Z' Args: tm : \u5f85\u683c\u5f0f\u5316\u7684\u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u53ef\u9009\u503c\u4e3a\uff1a's', 'ms', 'us' shift_forward: \u5982\u679c\u4e3aTrue\uff0c\u5219\u5c06end\u5411\u524d\u504f\u79fb\u4e00\u4e2a\u7cbe\u5ea6 Returns: \u8c03\u6574\u540e\u7b26\u5408influx\u65f6\u95f4\u89c4\u8303\u7684\u65f6\u95f4\uff08\u5b57\u7b26\u4e32\u8868\u793a\uff09 \"\"\" timespec = { \"s\" : \"seconds\" , \"ms\" : \"milliseconds\" , \"us\" : \"microseconds\" } . get ( precision ) if timespec is None : raise ValueError ( f \"precision must be one of 's', 'ms', 'us', but got { precision } \" ) tm = arrow . get ( tm ) . naive if shift_forward : tm = tm + datetime . timedelta ( ** { timespec : 1 }) return tm . isoformat ( sep = \"T\" , timespec = timespec ) + \"Z\"","title":"format_time()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.group","text":"[summary] Returns: Type Description Flux [description] Source code in omicron/dal/influx/flux.py def group ( self , by : Tuple [ str ]) -> \"Flux\" : \"\"\"[summary] Returns: [description] \"\"\" if \"group\" in self . expressions : raise DuplicateOperationError ( \"group has been set\" ) if isinstance ( by , str ): by = [ by ] cols = \",\" . join ([ f '\" { col } \"' for col in by ]) self . expressions [ \"group\" ] = f \" |> group(columns: [ { cols } ])\" return self","title":"group()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.latest","text":"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Parameters: Name Type Description Default n int \u6700\u540en\u6761\u6570\u636e required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def latest ( self , n : int ) -> \"Flux\" : \"\"\"\u83b7\u53d6\u6700\u540en\u6761\u6570\u636e\uff0c\u6309\u65f6\u95f4\u589e\u5e8f\u8fd4\u56de Flux\u67e5\u8be2\u7684\u589e\u5f3a\u529f\u80fd\uff0c\u76f8\u5f53\u4e8etop + sort + limit Args: n: \u6700\u540en\u6761\u6570\u636e Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" assert \"top\" not in self . expressions , \"top and last_n can not be used together\" assert ( \"sort\" not in self . expressions ), \"sort and last_n can not be used together\" assert ( \"limit\" not in self . expressions ), \"limit and last_n can not be used together\" self . _last_n = n return self","title":"latest()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.limit","text":"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default limit int \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 required Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def limit ( self , limit : int ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21limit\uff0c\u5982\u679climit\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: limit: \u8fd4\u56de\u8bb0\u5f55\u6570\u9650\u5236 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"limit\" in self . expressions : raise DuplicateOperationError ( \"limit has been set\" ) self . expressions [ \"limit\" ] = \" |> limit(n: %d )\" % limit return self","title":"limit()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.measurement","text":"add measurement filter to query Exceptions: Type Description DuplicateOperationError \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Type Description Flux Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def measurement ( self , measurement : str ) -> \"Flux\" : \"\"\"add measurement filter to query Raises: DuplicateOperationError: \u4e00\u6b21\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u4e2ameasurement, \u5982\u679c\u8868\u8fbe\u5f0f\u4e2d\u5df2\u7ecf\u5b58\u5728measurement, \u5219\u629b\u51fa\u5f02\u5e38 Returns: Flux\u5bf9\u8c61\u81ea\u8eab\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"measurement\" in self . expressions : raise DuplicateOperationError ( \"measurement has been set\" ) self . expressions [ \"measurement\" ] = f ' |> filter(fn: (r) => r[\"_measurement\"] == \" { measurement } \")' return self","title":"measurement()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.pivot","text":"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 pivot Parameters: Name Type Description Default row_keys List[str] \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] ['_time'] column_keys \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] ['_field'] value_column str \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" '_value' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def pivot ( self , row_keys : List [ str ] = [ \"_time\" ], column_keys = [ \"_field\" ], value_column : str = \"_value\" , ) -> \"Flux\" : \"\"\"pivot\u7528\u6765\u5c06\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u8f6c\u6362\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e Flux\u67e5\u8be2\u8fd4\u56de\u7684\u7ed3\u679c\u901a\u5e38\u90fd\u662f\u4ee5\u5217\u4e3a\u5355\u4f4d\u7684\u6570\u636e\uff0c\u589e\u52a0\u672cpivot\u6761\u4ef6\u540e\uff0c\u7ed3\u679c\u5c06\u88ab\u8f6c\u6362\u6210\u4e3a\u4ee5\u884c\u4e3a\u5355\u4f4d\u7684\u6570\u636e\u518d\u8fd4\u56de\u3002 \u8fd9\u91cc\u5b9e\u73b0\u7684\u662fmeasurement\u5185\u7684\u8f6c\u6362\uff0c\u8bf7\u53c2\u8003 [pivot](https://docs.influxdata.com/flux/v0.x/stdlib/universe/pivot/#align-fields-within-each-measurement-that-have-the-same-timestamp) Args: row_keys: \u60df\u4e00\u786e\u5b9a\u8f93\u51fa\u4e2d\u4e00\u884c\u6570\u636e\u7684\u5217\u540d\u5b57, \u9ed8\u8ba4\u4e3a[\"_time\"] column_keys: \u5217\u540d\u79f0\u5217\u8868\uff0c\u9ed8\u8ba4\u4e3a[\"_field\"] value_column: \u503c\u5217\u540d\uff0c\u9ed8\u8ba4\u4e3a\"_value\" Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"pivot\" in self . expressions : raise DuplicateOperationError ( \"pivot has been set\" ) columns = \",\" . join ([ f '\" { name } \"' for name in column_keys ]) rowkeys = \",\" . join ([ f '\" { name } \"' for name in row_keys ]) self . expressions [ \"pivot\" ] = f ' |> pivot(columnKey: [ { columns } ], rowKey: [ { rowkeys } ], valueColumn: \" { value_column } \")' return self","title":"pivot()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.range","text":"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e precision \u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53 right_close \u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539 end \u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4 end \u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b end \u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default start Union[datetime.date, datetime.datetime] \u5f00\u59cb\u65f6\u95f4 required end Union[datetime.date, datetime.datetime] \u7ed3\u675f\u65f6\u95f4 required right_close \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 True precision \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def range ( self , start : Frame , end : Frame , right_close = True , precision = \"s\" ) -> \"Flux\" : \"\"\"\u6dfb\u52a0\u65f6\u95f4\u8303\u56f4\u8fc7\u6ee4 \u5fc5\u987b\u6307\u5b9a\u7684\u67e5\u8be2\u6761\u4ef6\uff0c\u5426\u5219influxdb\u4f1a\u62a5unbound\u67e5\u8be2\u9519\uff0c\u56e0\u4e3a\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8fd4\u56de\u7684\u6570\u636e\u91cf\u5c06\u975e\u5e38\u5927\u3002 \u5728\u683c\u5f0f\u5316\u65f6\u95f4\u65f6\uff0c\u9700\u8981\u6839\u636e`precision`\u751f\u6210\u65f6\u95f4\u5b57\u7b26\u4e32\u3002\u5728\u5411Influxdb\u53d1\u9001\u8bf7\u6c42\u65f6\uff0c\u5e94\u8be5\u6ce8\u610f\u67e5\u8be2\u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u7cbe\u5ea6\u4e0e\u8fd9\u91cc\u4f7f\u7528\u7684\u4fdd\u6301\u4e00\u81f4\u3002 Influxdb\u7684\u67e5\u8be2\u7ed3\u679c\u9ed8\u8ba4\u4e0d\u5305\u542b\u7ed3\u675f\u65f6\u95f4\uff0c\u5f53`right_close`\u6307\u5b9a\u4e3aTrue\u65f6\uff0c\u6211\u4eec\u5c06\u6839\u636e\u6307\u5b9a\u7684\u7cbe\u5ea6\u4fee\u6539`end`\u65f6\u95f4\uff0c\u4f7f\u4e4b\u4ec5\u6bd4`end`\u591a\u4e00\u4e2a\u65f6\u95f4\u5355\u4f4d\uff0c\u4ece\u800c\u4fdd\u8bc1\u67e5\u8be2\u7ed3\u679c\u4f1a\u5305\u542b`end`\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6307\u5b9a\u4e00\u6b21\u65f6\u95f4\u8303\u56f4\uff0c\u5982\u679crange\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: start: \u5f00\u59cb\u65f6\u95f4 end: \u7ed3\u675f\u65f6\u95f4 right_close: \u67e5\u8be2\u7ed3\u679c\u662f\u5426\u5305\u542b\u7ed3\u675f\u65f6\u95f4\u3002 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u652f\u6301\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"range\" in self . expressions : raise DuplicateOperationError ( \"range has been set\" ) if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) end = self . format_time ( end , precision , right_close ) start = self . format_time ( start , precision ) self . expressions [ \"range\" ] = f \" |> range(start: { start } , stop: { end } )\" return self","title":"range()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.sort","text":"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e influxdb doc , \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Parameters: Name Type Description Default by List[str] \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 None Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def sort ( self , by : List [ str ] = None , desc : bool = False ) -> \"Flux\" : \"\"\"\u6309\u7167\u6307\u5b9a\u7684\u5217\u8fdb\u884c\u6392\u5e8f \u6839\u636e[influxdb doc](https://docs.influxdata.com/influxdb/v2.0/query-data/flux/first-last/), \u67e5\u8be2\u8fd4\u56de\u503c\u9ed8\u8ba4\u5730\u6309\u65f6\u95f4\u6392\u5e8f\u3002\u56e0\u6b64\uff0c\u5982\u679c\u4ec5\u4ec5\u662f\u8981\u6c42\u67e5\u8be2\u7ed3\u679c\u6309\u65f6\u95f4\u6392\u5e8f\uff0c\u65e0\u987b\u8c03\u7528\u6b64API\uff0c\u4f46\u662f\uff0c\u6b64API\u63d0\u4f9b\u4e86\u6309\u5176\u5b83\u5b57\u6bb5\u6392\u5e8f\u7684\u80fd\u529b\u3002 \u53e6\u5916\uff0c\u5728\u4e00\u4e2a\u67095000\u591a\u4e2atag\uff0c\u5171\u8fd4\u56de1M\u6761\u8bb0\u5f55\u7684\u6d4b\u8bd5\u4e2d\uff0c\u6d4b\u8bd5\u9a8c\u8bc1\u8fd4\u56de\u8bb0\u5f55\u786e\u5b9e\u6309_time\u5347\u5e8f\u6392\u5217\u3002 Args: by: \u6307\u5b9a\u6392\u5e8f\u7684\u5217\u540d\u79f0\u5217\u8868 Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"sort\" in self . expressions : raise DuplicateOperationError ( \"sort has been set\" ) if by is None : by = [ \"_value\" ] if isinstance ( by , str ): by = [ by ] columns_ = \",\" . join ([ f '\" { name } \"' for name in by ]) desc = \"true\" if desc else \"false\" self . expressions [ \"sort\" ] = f \" |> sort(columns: [ { columns_ } ], desc: { desc } )\" return self","title":"sort()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.tags","text":"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528 contains \u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528 or \u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a or )\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Exceptions: Type Description DuplicateOperationError \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Parameters: Name Type Description Default tags tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 required Examples: >>> flux = Flux () >>> flux . tags ({ \"code\" : [ \"000001\" , \"000002\" ], \"name\" : [ \"\u6d66\u53d1\u94f6\u884c\" ]}) . expressions [ \"tags\" ] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Type Description Flux Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c Source code in omicron/dal/influx/flux.py def tags ( self , tags : DefaultDict [ str , List [ str ]]) -> \"Flux\" : \"\"\"\u7ed9\u67e5\u8be2\u6dfb\u52a0tags\u8fc7\u6ee4\u6761\u4ef6 \u6b64\u67e5\u8be2\u6761\u4ef6\u4e3a\u8fc7\u6ee4\u6761\u4ef6\uff0c\u5e76\u975e\u5fc5\u987b\u3002\u5982\u679c\u67e5\u8be2\u4e2d\u6ca1\u6709\u6307\u5b9atags\uff0c\u5219\u4f1a\u8fd4\u56de\u6240\u6709\u8bb0\u5f55\u3002 \u5728\u5b9e\u73b0\u4e0a\uff0c\u65e2\u53ef\u4ee5\u4f7f\u7528`contains`\u8bed\u6cd5\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528`or`\u8bed\u6cd5(\u7531\u4e8e\u4e00\u6761\u8bb0\u5f55\u53ea\u80fd\u5c5e\u4e8e\u4e00\u4e2atag\uff0c\u6240\u4ee5\uff0c\u5f53\u6307\u5b9a\u591a\u4e2atag\u8fdb\u884c\u67e5\u8be2\u65f6\uff0c\u5b83\u4eec\u4e4b\u95f4\u7684\u5173\u7cfb\u5e94\u8be5\u4e3a`or`)\u3002\u7ecf\u9a8c\u8bc1\uff0ccontains\u8bed\u6cd5\u4f1a\u59cb\u7ec8\u5148\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u8bb0\u5f55\u68c0\u7d22\u51fa\u6765\uff0c\u518d\u8fdb\u884c\u8fc7\u6ee4\u3002\u8fd9\u6837\u7684\u6548\u7387\u6bd4\u8f83\u4f4e\uff0c\u7279\u522b\u662f\u5f53tags\u7684\u6570\u91cf\u8f83\u5c11\u65f6\uff0c\u4f1a\u8fdc\u8fdc\u6bd4\u4f7f\u7528or\u8bed\u6cd5\u6162\u3002 Raises: DuplicateOperationError: \u4e00\u4e2a\u67e5\u8be2\u4e2d\u53ea\u5141\u8bb8\u6267\u884c\u4e00\u6b21\uff0c\u5982\u679ctag filter\u8868\u8fbe\u5f0f\u5df2\u7ecf\u5b58\u5728\uff0c\u5219\u629b\u51fa\u5f02\u5e38 Args: tags : tags\u662f\u4e00\u4e2a{tagname: Union[str,[tag_values]]}\u5bf9\u8c61\u3002 Examples: >>> flux = Flux() >>> flux.tags({\"code\": [\"000001\", \"000002\"], \"name\": [\"\u6d66\u53d1\u94f6\u884c\"]}).expressions[\"tags\"] ' |> filter(fn: (r) => r[\"code\"] == \"000001\" or r[\"code\"] == \"000002\" or r[\"name\"] == \"\u6d66\u53d1\u94f6\u884c\")' Returns: Flux\u5bf9\u8c61\uff0c\u4ee5\u4fbf\u8fdb\u884c\u7ba1\u9053\u64cd\u4f5c \"\"\" if \"tags\" in self . expressions : raise DuplicateOperationError ( \"tags has been set\" ) filters = [] for tag , values in tags . items (): assert ( isinstance ( values , str ) or len ( values ) > 0 ), f \"tag { tag } should not be empty or None\" if isinstance ( values , str ): values = [ values ] for v in values : filters . append ( f 'r[\" { tag } \"] == \" { v } \"' ) op_expression = \" or \" . join ( filters ) self . expressions [ \"tags\" ] = f \" |> filter(fn: (r) => { op_expression } )\" return self","title":"tags()"},{"location":"api/dal/flux/#omicron.dal.influx.flux.Flux.to_timestamp","text":"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c tm \u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Parameters: Name Type Description Default tm Union[datetime.date, datetime.datetime] \u65f6\u95f4 required precision str \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 's' Returns: Type Description int \u65f6\u95f4\u6233 Source code in omicron/dal/influx/flux.py @classmethod def to_timestamp ( cls , tm : Frame , precision : str = \"s\" ) -> int : \"\"\"\u5c06\u65f6\u95f4\u6839\u636e\u7cbe\u5ea6\u8f6c\u6362\u4e3aunix\u65f6\u95f4\u6233 \u5728\u5f80influxdb\u5199\u5165\u6570\u636e\u65f6\uff0cline-protocol\u8981\u6c42\u7684\u65f6\u95f4\u6233\u4e3aunix timestamp\uff0c\u5e76\u4e14\u4e0e\u5176\u7cbe\u5ea6\u5bf9\u5e94\u3002 influxdb\u59cb\u7ec8\u4f7f\u7528UTC\u65f6\u95f4\uff0c\u56e0\u6b64\uff0c`tm`\u4e5f\u5fc5\u987b\u5df2\u7ecf\u8f6c\u6362\u6210UTC\u65f6\u95f4\u3002 Args: tm: \u65f6\u95f4 precision: \u65f6\u95f4\u7cbe\u5ea6\uff0c\u9ed8\u8ba4\u4e3a\u79d2\u3002 Returns: \u65f6\u95f4\u6233 \"\"\" if precision not in [ \"s\" , \"ms\" , \"us\" ]: raise AssertionError ( \"precision must be 's', 'ms' or 'us'\" ) # get int repr of tm, in seconds unit if isinstance ( tm , np . datetime64 ): tm = tm . astype ( \"datetime64[s]\" ) . astype ( \"int\" ) elif isinstance ( tm , datetime . datetime ): tm = tm . timestamp () else : tm = arrow . get ( tm ) . timestamp () return int ( tm * 10 ** ({ \"s\" : 0 , \"ms\" : 3 , \"us\" : 6 }[ precision ]))","title":"to_timestamp()"},{"location":"api/dal/influxclient/","text":"InfluxClient - the performanct async client for influxdb \u00b6 Source code in omicron/dal/influx/influxclient.py class InfluxClient : def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" ) async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" ) __init__ ( self , url , token , bucket , org = None , enable_compress = False , chunk_size = 5000 , precision = 's' ) special \u00b6 [summary] Parameters: Name Type Description Default url [type] [description] required token [type] [description] required bucket [type] [description] required org [type] [description]. Defaults to None. None enable_compress [type] [description]. Defaults to False. False chunk_size int number of lines to be saved in one request 5000 precision str \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 's' Source code in omicron/dal/influx/influxclient.py def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } create_bucket ( self , description = None , retention_rules = None , org_id = None ) async \u00b6 \u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default description \u6307\u5b9abucket\u7684\u63cf\u8ff0 None org_id str \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 None Exceptions: Type Description InfluxSchemaError \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: Type Description str \u65b0\u521b\u5efa\u7684bucket\u7684id Source code in omicron/dal/influx/influxclient.py async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] delete ( self , measurement , stop , tags = {}, start = None , precision = 's' ) async \u00b6 \u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1 Flux.delete \u3002 Parameters: Name Type Description Default measurement str \u6307\u5b9ameasurement\u540d\u5b57 required stop datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 required start datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START None tags Optional[Dict[str, str]] \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 {} precision str \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 's' Exceptions: Type Description InfluxDeleteError \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 Source code in omicron/dal/influx/influxclient.py async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) delete_bucket ( self , bucket_id = None ) async \u00b6 \u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default bucket_id str \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 None Source code in omicron/dal/influx/influxclient.py async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) drop_measurement ( self , measurement ) async \u00b6 \u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 Source code in omicron/dal/influx/influxclient.py async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) list_buckets ( self ) async \u00b6 \u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: Type Description list of buckets, each bucket is a dict with keys ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` Source code in omicron/dal/influx/influxclient.py async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] list_organizations ( self , offset = 0 , limit = 100 ) async \u00b6 \u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Parameters: Name Type Description Default offset \u5206\u9875\u8d77\u70b9 0 limit \u6bcf\u9875size 100 Exceptions: Type Description InfluxSchemaError influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: Type Description list of organizations, each organization is a dict with keys 1 2 3 4 5 6 id : the id of the org links name : the name of the org description createdAt updatedAt Source code in omicron/dal/influx/influxclient.py async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] query ( self , flux , deserializer = None ) async \u00b6 flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a 1 2 ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 \u4e0a\u8ff0 result \u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Parameters: Name Type Description Default flux Union[omicron.dal.influx.flux.Flux, str] flux\u67e5\u8be2\u8bed\u53e5 required deserializer Callable \u53cd\u5e8f\u5217\u5316\u51fd\u6570 None Returns: Type Description Any \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 Source code in omicron/dal/influx/influxclient.py async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body query_org_id ( self , name = None ) async \u00b6 \u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Parameters: Name Type Description Default name str \u6307\u5b9a\u7ec4\u7ec7\u540d None Returns: Type Description str \u7ec4\u7ec7id Source code in omicron/dal/influx/influxclient.py async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" ) save ( self , data , measurement = None , tag_keys = [], time_key = None , global_tags = {}, chunk_size = None ) async \u00b6 save data into influxdb if data is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If data is str, use write method instead. Parameters: Name Type Description Default data Union[numpy.ndarray, pandas.core.frame.DataFrame] data to be saved required measurement str the name of measurement None tag_keys List[str] which columns name will be used as tags [] chunk_size int number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to self._chunk_size None Exceptions: Type Description InfluxDBWriteError if write failed Source code in omicron/dal/influx/influxclient.py async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) write ( self , line_protocol ) async \u00b6 \u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Parameters: Name Type Description Default line_protocol str \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 required Source code in omicron/dal/influx/influxclient.py async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" )","title":"InfluxClient"},{"location":"api/dal/influxclient/#influxclient---the-performanct-async-client-for-influxdb","text":"Source code in omicron/dal/influx/influxclient.py class InfluxClient : def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , } async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" ) async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" ) async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive ) async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" ) async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ] async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" ) async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ] async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ] async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" )","title":"InfluxClient - the performanct async client for influxdb"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.__init__","text":"[summary] Parameters: Name Type Description Default url [type] [description] required token [type] [description] required bucket [type] [description] required org [type] [description]. Defaults to None. None enable_compress [type] [description]. Defaults to False. False chunk_size int number of lines to be saved in one request 5000 precision str \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 's' Source code in omicron/dal/influx/influxclient.py def __init__ ( self , url : str , token : str , bucket : str , org : str = None , enable_compress = False , chunk_size : int = 5000 , precision : str = \"s\" , ): \"\"\"[summary] Args: url ([type]): [description] token ([type]): [description] bucket ([type]): [description] org ([type], optional): [description]. Defaults to None. enable_compress ([type], optional): [description]. Defaults to False. chunk_size: number of lines to be saved in one request precision: \u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6 \"\"\" self . _url = url self . _bucket = bucket self . _enable_compress = enable_compress self . _org = org self . _org_id = None # \u9700\u8981\u65f6\u901a\u8fc7\u67e5\u8be2\u83b7\u53d6\uff0c\u6b64\u540e\u4e0d\u518d\u66f4\u65b0 self . _token = token # influxdb 2.0\u8d77\u652f\u6301\u7684\u65f6\u95f4\u7cbe\u5ea6\u6709\uff1ans, us, ms, s\u3002\u672c\u5ba2\u6237\u7aef\u53ea\u652f\u6301s, ms\u548cus self . _precision = precision . lower () if self . _precision not in [ \"s\" , \"ms\" , \"us\" ]: # pragma: no cover raise ValueError ( \"precision must be one of ['s', 'ms', 'us']\" ) self . _chunk_size = chunk_size # write self . _write_url = f \" { self . _url } /api/v2/write?org= { self . _org } &bucket= { self . _bucket } &precision= { self . _precision } \" self . _write_headers = { \"Content-Type\" : \"text/plain; charset=utf-8\" , \"Authorization\" : f \"Token { token } \" , \"Accept\" : \"application/json\" , } if self . _enable_compress : self . _write_headers [ \"Content-Encoding\" ] = \"gzip\" self . _query_url = f \" { self . _url } /api/v2/query?org= { self . _org } \" self . _query_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/vnd.flux\" , # influx\u67e5\u8be2\u7ed3\u679c\u683c\u5f0f\uff0c\u65e0\u8bba\u5982\u4f55\u6307\u5b9a\uff08\u6216\u8005\u4e0d\u6307\u5b9a\uff09\uff0c\u57282.1\u4e2d\u59cb\u7ec8\u662fcsv\u683c\u5f0f \"Accept\" : \"text/csv\" , } if self . _enable_compress : self . _query_headers [ \"Accept-Encoding\" ] = \"gzip\" self . _delete_url = ( f \" { self . _url } /api/v2/delete?org= { self . _org } &bucket= { self . _bucket } \" ) self . _delete_headers = { \"Authorization\" : f \"Token { token } \" , \"Content-Type\" : \"application/json\" , }","title":"__init__()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.create_bucket","text":"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default description \u6307\u5b9abucket\u7684\u63cf\u8ff0 None org_id str \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 None Exceptions: Type Description InfluxSchemaError \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: Type Description str \u65b0\u521b\u5efa\u7684bucket\u7684id Source code in omicron/dal/influx/influxclient.py async def create_bucket ( self , description = None , retention_rules : List [ Dict ] = None , org_id : str = None ) -> str : \"\"\"\u521b\u5efainfluxdb\u4e2d\u6307\u5b9abucket Args: description: \u6307\u5b9abucket\u7684\u63cf\u8ff0 org_id: \u6307\u5b9abucket\u6240\u5c5e\u7684\u7ec4\u7ec7id\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528\u672cclient\u5bf9\u5e94\u7684\u7ec4\u7ec7id\u3002 Raises: InfluxSchemaError: \u5f53influxdb\u8fd4\u56de\u9519\u8bef\u65f6\uff0c\u6bd4\u5982\u91cd\u590d\u521b\u5efabucket\u7b49\uff0c\u4f1a\u629b\u51fa\u6b64\u5f02\u5e38 Returns: \u65b0\u521b\u5efa\u7684bucket\u7684id \"\"\" if org_id is None : org_id = await self . query_org_id () url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } data = { \"name\" : self . _bucket , \"orgID\" : org_id , \"description\" : description , \"retentionRules\" : retention_rules , } async with ClientSession () as session : async with session . post ( url , data = json . dumps ( data ), headers = headers ) as resp : if resp . status != 201 : err = await resp . json () logger . warning ( \"influxdb create bucket error: %s when processin command %s \" , err [ \"message\" ], data , ) raise InfluxSchemaError ( f \"influxdb create bucket failed, status code: { err [ 'message' ] } \" ) else : result = await resp . json () return result [ \"id\" ]","title":"create_bucket()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.delete","text":"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1 Flux.delete \u3002 Parameters: Name Type Description Default measurement str \u6307\u5b9ameasurement\u540d\u5b57 required stop datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 required start datetime \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START None tags Optional[Dict[str, str]] \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 {} precision str \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 's' Exceptions: Type Description InfluxDeleteError \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 Source code in omicron/dal/influx/influxclient.py async def delete ( self , measurement : str , stop : datetime . datetime , tags : Optional [ Dict [ str , str ]] = {}, start : datetime . datetime = None , precision : str = \"s\" , ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9a\u65f6\u95f4\u6bb5\u5185\u7684\u6570\u636e \u5173\u4e8e\u53c2\u6570\uff0c\u8bf7\u53c2\u89c1[Flux.delete][omicron.dal.influx.flux.Flux.delete]\u3002 Args: measurement: \u6307\u5b9ameasurement\u540d\u5b57 stop: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u7ed3\u675f\u65f6\u95f4 start: \u5f85\u5220\u9664\u8bb0\u5f55\u7684\u5f00\u59cb\u65f6\u95f4\uff0c\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528EPOCH_START tags: \u6309tag\u8fdb\u884c\u8fc7\u6ee4\u7684\u6761\u4ef6 precision: \u7528\u4ee5\u683c\u5f0f\u5316\u8d77\u59cb\u548c\u7ed3\u675f\u65f6\u95f4\u3002 Raises: InfluxDeleteError: \u5982\u679c\u5220\u9664\u5931\u8d25\uff0c\u5219\u629b\u51fa\u6b64\u5f02\u5e38 \"\"\" # todo: add raise error declaration command = Flux () . delete ( measurement , stop , tags , start = start , precision = precision ) async with ClientSession () as session : async with session . post ( self . _delete_url , data = json . dumps ( command ), headers = self . _delete_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete error: %s when processin command %s \" , err [ \"message\" ], command , ) raise InfluxDeleteError ( f \"influxdb delete failed, status code: { err [ 'message' ] } \" )","title":"delete()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.delete_bucket","text":"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Parameters: Name Type Description Default bucket_id str \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 None Source code in omicron/dal/influx/influxclient.py async def delete_bucket ( self , bucket_id : str = None ): \"\"\"\u5220\u9664influxdb\u4e2d\u6307\u5b9abucket Args: bucket_id: \u6307\u5b9abucket\u7684id\u3002\u5982\u679c\u4e3aNone\uff0c\u5219\u4f1a\u5220\u9664\u672cclient\u5bf9\u5e94\u7684bucket\u3002 \"\"\" if bucket_id is None : buckets = await self . list_buckets () for bucket in buckets : if bucket [ \"type\" ] == \"user\" and bucket [ \"name\" ] == self . _bucket : bucket_id = bucket [ \"id\" ] break else : raise BadParameterError ( \"bucket_id is None, and we can't find bucket with name: %s \" % self . _bucket ) url = f \" { self . _url } /api/v2/buckets/ { bucket_id } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . delete ( url , headers = headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb delete bucket error: %s when processin command %s \" , err [ \"message\" ], bucket_id , ) raise InfluxSchemaError ( f \"influxdb delete bucket failed, status code: { err [ 'message' ] } \" )","title":"delete_bucket()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.drop_measurement","text":"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 Source code in omicron/dal/influx/influxclient.py async def drop_measurement ( self , measurement : str ): \"\"\"\u4eceinfluxdb\u4e2d\u5220\u9664\u4e00\u4e2ameasurement \u8c03\u7528\u6b64\u65b9\u6cd5\u540e\uff0c\u5b9e\u9645\u4e0a\u8be5measurement\u4ecd\u7136\u5b58\u5728\uff0c\u53ea\u662f\u6ca1\u6709\u6570\u636e\u3002 \"\"\" # todo: add raise error declaration await self . delete ( measurement , arrow . now () . naive )","title":"drop_measurement()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.list_buckets","text":"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: Type Description list of buckets, each bucket is a dict with keys ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` Source code in omicron/dal/influx/influxclient.py async def list_buckets ( self ) -> List [ Dict ]: \"\"\"\u5217\u51fainfluxdb\u4e2d\u5bf9\u5e94token\u80fd\u770b\u5230\u7684\u6240\u6709\u7684bucket Returns: list of buckets, each bucket is a dict with keys: ``` id orgID, a 16 bytes hex string type, system or user description name retentionRules createdAt updatedAt links labels ``` \"\"\" url = f \" { self . _url } /api/v2/buckets\" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () raise InfluxSchemaError ( f \"influxdb list bucket failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"buckets\" ]","title":"list_buckets()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.list_organizations","text":"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Parameters: Name Type Description Default offset \u5206\u9875\u8d77\u70b9 0 limit \u6bcf\u9875size 100 Exceptions: Type Description InfluxSchemaError influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: Type Description list of organizations, each organization is a dict with keys 1 2 3 4 5 6 id : the id of the org links name : the name of the org description createdAt updatedAt Source code in omicron/dal/influx/influxclient.py async def list_organizations ( self , offset : int = 0 , limit : int = 100 ) -> List [ Dict ]: \"\"\"\u5217\u51fa\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u6240\u7ec4\u7ec7 Args: offset : \u5206\u9875\u8d77\u70b9 limit : \u6bcf\u9875size Raises: InfluxSchemaError: influxdb\u8fd4\u56de\u7684\u9519\u8bef Returns: list of organizations, each organization is a dict with keys: ``` id : the id of the org links name : the name of the org description createdAt updatedAt ``` \"\"\" url = f \" { self . _url } /api/v2/orgs?offset= { offset } &limit= { limit } \" headers = { \"Authorization\" : f \"Token { self . _token } \" } async with ClientSession () as session : async with session . get ( url , headers = headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( \"influxdb query orgs err: %s \" , err [ \"message\" ]) raise InfluxSchemaError ( f \"influxdb query orgs failed, status code: { err [ 'message' ] } \" ) else : return ( await resp . json ())[ \"orgs\" ]","title":"list_organizations()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.query","text":"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a 1 2 ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 \u4e0a\u8ff0 result \u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Parameters: Name Type Description Default flux Union[omicron.dal.influx.flux.Flux, str] flux\u67e5\u8be2\u8bed\u53e5 required deserializer Callable \u53cd\u5e8f\u5217\u5316\u51fd\u6570 None Returns: Type Description Any \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 Source code in omicron/dal/influx/influxclient.py async def query ( self , flux : Union [ Flux , str ], deserializer : Callable = None ) -> Any : \"\"\"flux\u67e5\u8be2 flux\u67e5\u8be2\u7ed3\u679c\u662f\u4e00\u4e2a\u4ee5annotated csv\u683c\u5f0f\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4f8b\u5982\uff1a ``` ,result,table,_time,code,amount,close,factor,high,low,open,volume ,_result,0,2019-01-01T00:00:00Z,000001.XSHE,100000000,5.15,1.23,5.2,5,5.1,1000000 ``` \u4e0a\u8ff0`result`\u4e2d\uff0c\u4e8b\u5148\u901a\u8fc7Flux.keep()\u9650\u5236\u4e86\u8fd4\u56de\u7684\u5b57\u6bb5\u4e3a_time,code,amount,close,factor,high,low,open,volume\u3002influxdb\u67e5\u8be2\u8fd4\u56de\u7ed3\u679c\u65f6\uff0c\u603b\u662f\u6309\u7167\u5b57\u6bb5\u540d\u79f0\u5347\u5e8f\u6392\u5217\u3002\u6b64\u5916\uff0c\u603b\u662f\u4f1a\u989d\u5916\u5730\u8fd4\u56de_result, table\u4e24\u4e2a\u5b57\u6bb5\u3002 \u5982\u679c\u4f20\u5165\u4e86deserializer\uff0c\u5219\u4f1a\u8c03\u7528deserializer\u5c06\u5176\u89e3\u6790\u6210\u4e3apython\u5bf9\u8c61\u3002\u5426\u5219\uff0c\u8fd4\u56debytes\u6570\u636e\u3002 Args: flux: flux\u67e5\u8be2\u8bed\u53e5 deserializer: \u53cd\u5e8f\u5217\u5316\u51fd\u6570 Returns: \u5982\u679c\u672a\u63d0\u4f9b\u53cd\u5e8f\u5217\u5316\u51fd\u6570\uff0c\u5219\u8fd4\u56de\u7ed3\u679c\u4e3abytes array(\u5982\u679c\u6307\u5b9a\u4e86compress=True\uff0c\u8fd4\u56de\u7ed3\u679c\u4e3agzip\u89e3\u538b\u7f29\u540e\u7684bytes array)\uff0c\u5426\u5219\u8fd4\u56de\u53cd\u5e8f\u5217\u5316\u540e\u7684python\u5bf9\u8c61 \"\"\" if isinstance ( flux , Flux ): flux = str ( flux ) async with ClientSession () as session : async with session . post ( self . _query_url , data = flux , headers = self . _query_headers ) as resp : if resp . status != 200 : err = await resp . json () logger . warning ( f \"influxdb query error: { err } when processing { flux [: 500 ] } \" ) logger . debug ( \"data caused error: %s \" , flux ) raise InfluxDBQueryError ( f \"influxdb query failed, status code: { err [ 'message' ] } \" ) else : # auto-unzip body = await resp . read () if deserializer : try : return deserializer ( body ) except Exception as e : logger . exception ( e ) logger . warning ( \"failed to deserialize data: %s , the query is: %s \" , body , flux [: 500 ], ) raise else : return body","title":"query()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.query_org_id","text":"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Parameters: Name Type Description Default name str \u6307\u5b9a\u7ec4\u7ec7\u540d None Returns: Type Description str \u7ec4\u7ec7id Source code in omicron/dal/influx/influxclient.py async def query_org_id ( self , name : str = None ) -> str : \"\"\"\u901a\u8fc7\u7ec4\u7ec7\u540d\u67e5\u627e\u7ec4\u7ec7id \u53ea\u80fd\u67e5\u7684\u672c\u5ba2\u6237\u7aef\u5141\u8bb8\u67e5\u8be2\u7684\u7ec4\u7ec7\u3002\u5982\u679cname\u672a\u63d0\u4f9b\uff0c\u5219\u4f7f\u7528\u672c\u5ba2\u6237\u7aef\u521b\u5efa\u65f6\u4f20\u5165\u7684\u7ec4\u7ec7\u540d\u3002 Args: name: \u6307\u5b9a\u7ec4\u7ec7\u540d Returns: \u7ec4\u7ec7id \"\"\" if name is None : name = self . _org orgs = await self . list_organizations () for org in orgs : if org [ \"name\" ] == name : return org [ \"id\" ] raise BadParameterError ( f \"can't find org with name: { name } \" )","title":"query_org_id()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.save","text":"save data into influxdb if data is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If data is str, use write method instead. Parameters: Name Type Description Default data Union[numpy.ndarray, pandas.core.frame.DataFrame] data to be saved required measurement str the name of measurement None tag_keys List[str] which columns name will be used as tags [] chunk_size int number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to self._chunk_size None Exceptions: Type Description InfluxDBWriteError if write failed Source code in omicron/dal/influx/influxclient.py async def save ( self , data : Union [ np . ndarray , DataFrame ], measurement : str = None , tag_keys : List [ str ] = [], time_key : str = None , global_tags : Dict = {}, chunk_size : int = None , ) -> None : \"\"\"save `data` into influxdb if `data` is a pandas.DataFrame or numy structured array, it will be converted to line protocol and saved. If `data` is str, use `write` method instead. Args: data: data to be saved measurement: the name of measurement tag_keys: which columns name will be used as tags chunk_size: number of lines to be saved in one request. if it's -1, then all data will be written in one request. If it's None, then it will be set to `self._chunk_size` Raises: InfluxDBWriteError: if write failed \"\"\" # todo: add more errors raise if isinstance ( data , DataFrame ): assert ( measurement is not None ), \"measurement must be specified when data is a DataFrame\" if tag_keys : assert set ( tag_keys ) in set ( data . columns . tolist () ), \"tag_keys must be in data.columns\" serializer = DataframeSerializer ( data , measurement , time_key , tag_keys , global_tags , precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) elif isinstance ( data , np . ndarray ): assert ( measurement is not None ), \"measurement must be specified when data is a numpy array\" assert ( time_key is not None ), \"time_key must be specified when data is a numpy array\" serializer = NumpySerializer ( data , measurement , time_key , tag_keys , global_tags , time_precision = self . _precision , ) if chunk_size == - 1 : chunk_size = len ( data ) for lines in serializer . serialize ( chunk_size or self . _chunk_size ): await self . write ( lines ) else : raise TypeError ( f \"data must be pandas.DataFrame, numpy array, got { type ( data ) } \" )","title":"save()"},{"location":"api/dal/influxclient/#omicron.dal.influx.influxclient.InfluxClient.write","text":"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Parameters: Name Type Description Default line_protocol str \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 required Source code in omicron/dal/influx/influxclient.py async def write ( self , line_protocol : str ): \"\"\"\u5c06line-protocol\u6570\u7ec4\u5199\u5165influxdb Args: line_protocol: \u5f85\u5199\u5165\u7684\u6570\u636e\uff0c\u4ee5line-protocol\u6570\u7ec4\u5f62\u5f0f\u5b58\u5728 \"\"\" # todo: add raise error declaration if self . _enable_compress : line_protocol_ = gzip . compress ( line_protocol . encode ( \"utf-8\" )) else : line_protocol_ = line_protocol async with ClientSession () as session : async with session . post ( self . _write_url , data = line_protocol_ , headers = self . _write_headers ) as resp : if resp . status != 204 : err = await resp . json () logger . warning ( \"influxdb write error when processing: %s , err code: %s , message: %s \" , { line_protocol [: 100 ]}, err [ \"code\" ], err [ \"message\" ], ) logger . debug ( \"data caused error: %s \" , line_protocol ) raise InfluxDBWriteError ( f \"influxdb write failed, err: { err [ 'message' ] } \" )","title":"write()"},{"location":"api/dal/serialize/","text":"Serializer and Deserializer \u00b6 DataFrameDeserializer \u00b6 Source code in omicron/dal/influx/serialize.py class DataframeDeserializer ( Serializer ): def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) def __call__ ( self , data : Union [ str , bytes ]) -> pd . DataFrame : if isinstance ( data , str ): # treat data as string stream = io . StringIO ( data ) else : stream = io . StringIO ( data . decode ( self . encoding )) df = pd . read_csv ( stream , sep = self . sep , header = self . header , names = self . names , index_col = self . index_col , usecols = self . usecols , dtype = self . dtype , engine = self . engine , converters = self . converters , skiprows = self . skiprows , skipfooter = self . skipfooter , infer_datetime_format = self . infer_datetime_format , lineterminator = self . lineterminator , ** self . kwargs , ) if self . usecols : df = df [ list ( self . usecols )] if self . sort_values is not None : return df . sort_values ( self . sort_values ) else : return df __init__ ( self , sort_values = None , encoding = 'utf-8' , names = None , usecols = None , dtype = None , time_col = None , sep = ',' , header = 'infer' , engine = None , infer_datetime_format = True , lineterminator = None , converters = None , skipfooter = 0 , index_col = None , skiprows = None , ** kwargs ) special \u00b6 constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: pandas.read_csv for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. 1 - specify dtype when possible use usecols to specify the columns to read, and names to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when names is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in names to access the data (instead of which in usecols ). Examples: >>> data = \",result,table,_time,code,name \\r\\n ,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer ( names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" , \"name\" ], usecols = [ \"frame\" , \"code\" , \"name\" ]) >>> des ( data ) frame code name 0 2019 - 01 - 01 T09 : 31 : 00 Z 000002. XSHE \u56fd\u8054\u8bc1\u5238 Parameters: Name Type Description Default sort_values Union[str, List[str]] sort the dataframe by the specified columns None encoding str if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array 'utf-8' sep str the separator/delimiter of each fields ',' header Union[int, List[int], str] the row number of the header, default is 'infer' 'infer' names List[str] the column names of the dataframe None index_col Union[int, str, List[int], List[str], bool] the column number or name of the index column None usecols Union[List[int], List[str]] the column name of the columns to use None dtype dict the dtype of the columns None engine str the engine of the csv file, default is None None converters dict specify converter for columns. None skiprows Union[int, List[int], Callable] the row number to skip None skipfooter the row number to skip at the end of the file 0 time_col Union[int, str] the columns to parse as dates None infer_datetime_format whether to infer the datetime format True lineterminator str the line terminator of the csv file, only valid when engine is 'c' None kwargs other arguments {} Source code in omicron/dal/influx/serialize.py def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) NumpyDeserializer \u00b6 Source code in omicron/dal/influx/serialize.py class NumpyDeserializer ( Serializer ): def __init__ ( self , dtype : List [ tuple ] = \"float\" , sort_values : Union [ str , List [ str ]] = None , use_cols : Union [ List [ str ], List [ int ]] = None , parse_date : Union [ int , str ] = \"_time\" , sep : str = \",\" , encoding : str = \"utf-8\" , skip_rows : Union [ int , List [ int ]] = 1 , header_line : int = 1 , comments : str = \"#\" , converters : Mapping [ int , Callable ] = None , ): \"\"\"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. ``` dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None def _parse_header_once ( self , stream ): \"\"\"parse header and convert use_cols, if columns is specified in string. And if parse_date is required, add it into converters Args: stream : [description] Raises: SerializationError: [description] \"\"\" if self . header_line is None or self . _parsed_headers is not None : return try : line = stream . readlines ( self . header_line )[ - 1 ] cols = line . strip () . split ( self . sep ) self . _parsed_headers = cols use_cols = self . use_cols if use_cols is not None and isinstance ( use_cols [ 0 ], str ): self . use_cols = [ cols . index ( col ) for col in self . use_cols ] # convert keys of converters to int converters = { cols . index ( k ): v for k , v in self . converters . items ()} self . converters = converters if isinstance ( self . parse_date , str ): parse_date = cols . index ( self . parse_date ) if parse_date in self . converters . keys (): logger . debug ( \"specify duplicated converter in both parse_date and converters for col %s , use converters.\" , self . parse_date , ) else : # \u589e\u52a0parse_date\u5230converters self . converters [ parse_date ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) stream . seek ( 0 ) except ( IndexError , ValueError ): if line . strip () == \"\" : content = \"\" . join ( stream . readlines ()) . strip () if len ( content ) > 0 : raise SerializationError ( f \"specified heder line { self . header_line } is empty\" ) else : raise EmptyResult () else : raise SerializationError ( f \"bad header[ { self . header_line } ]: { line } \" ) def __call__ ( self , data : bytes ) -> np . ndarray : if self . encoding and isinstance ( data , bytes ): stream = io . StringIO ( data . decode ( self . encoding )) else : stream = io . StringIO ( data ) try : self . _parse_header_once ( stream ) except EmptyResult : return np . empty (( 0 ,), dtype = self . dtype ) arr = np . loadtxt ( stream . readlines (), delimiter = self . sep , skiprows = self . skip_rows , dtype = self . dtype , usecols = self . use_cols , converters = self . converters , encoding = self . encoding , ) # \u5982\u679c\u8fd4\u56de\u4ec5\u4e00\u6761\u8bb0\u5f55\uff0c\u6709\u65f6\u4f1a\u51fa\u73b0 shape == () if arr . shape == tuple (): arr = arr . reshape (( - 1 ,)) if self . sort_values is not None and arr . size > 1 : return np . sort ( arr , order = self . sort_values ) else : return arr __init__ ( self , dtype = 'float' , sort_values = None , use_cols = None , parse_date = '_time' , sep = ',' , encoding = 'utf-8' , skip_rows = 1 , header_line = 1 , comments = '#' , converters = None ) special \u00b6 construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. 1 dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None","title":"Serialize"},{"location":"api/dal/serialize/#serializer-and-deserializer","text":"","title":"Serializer and Deserializer"},{"location":"api/dal/serialize/#dataframedeserializer","text":"Source code in omicron/dal/influx/serialize.py class DataframeDeserializer ( Serializer ): def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) def __call__ ( self , data : Union [ str , bytes ]) -> pd . DataFrame : if isinstance ( data , str ): # treat data as string stream = io . StringIO ( data ) else : stream = io . StringIO ( data . decode ( self . encoding )) df = pd . read_csv ( stream , sep = self . sep , header = self . header , names = self . names , index_col = self . index_col , usecols = self . usecols , dtype = self . dtype , engine = self . engine , converters = self . converters , skiprows = self . skiprows , skipfooter = self . skipfooter , infer_datetime_format = self . infer_datetime_format , lineterminator = self . lineterminator , ** self . kwargs , ) if self . usecols : df = df [ list ( self . usecols )] if self . sort_values is not None : return df . sort_values ( self . sort_values ) else : return df","title":"DataFrameDeserializer"},{"location":"api/dal/serialize/#omicron.dal.influx.serialize.DataframeDeserializer.__init__","text":"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: pandas.read_csv for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. 1 - specify dtype when possible use usecols to specify the columns to read, and names to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when names is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in names to access the data (instead of which in usecols ). Examples: >>> data = \",result,table,_time,code,name \\r\\n ,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer ( names = [ \"_\" , \"result\" , \"table\" , \"frame\" , \"code\" , \"name\" ], usecols = [ \"frame\" , \"code\" , \"name\" ]) >>> des ( data ) frame code name 0 2019 - 01 - 01 T09 : 31 : 00 Z 000002. XSHE \u56fd\u8054\u8bc1\u5238 Parameters: Name Type Description Default sort_values Union[str, List[str]] sort the dataframe by the specified columns None encoding str if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array 'utf-8' sep str the separator/delimiter of each fields ',' header Union[int, List[int], str] the row number of the header, default is 'infer' 'infer' names List[str] the column names of the dataframe None index_col Union[int, str, List[int], List[str], bool] the column number or name of the index column None usecols Union[List[int], List[str]] the column name of the columns to use None dtype dict the dtype of the columns None engine str the engine of the csv file, default is None None converters dict specify converter for columns. None skiprows Union[int, List[int], Callable] the row number to skip None skipfooter the row number to skip at the end of the file 0 time_col Union[int, str] the columns to parse as dates None infer_datetime_format whether to infer the datetime format True lineterminator str the line terminator of the csv file, only valid when engine is 'c' None kwargs other arguments {} Source code in omicron/dal/influx/serialize.py def __init__ ( self , sort_values : Union [ str , List [ str ]] = None , encoding : str = \"utf-8\" , names : List [ str ] = None , usecols : Union [ List [ int ], List [ str ]] = None , dtype : dict = None , time_col : Union [ int , str ] = None , sep : str = \",\" , header : Union [ int , List [ int ], str ] = \"infer\" , engine : str = None , infer_datetime_format = True , lineterminator : str = None , converters : dict = None , skipfooter = 0 , index_col : Union [ int , str , List [ int ], List [ str ], bool ] = None , skiprows : Union [ int , List [ int ], Callable ] = None , ** kwargs , ): \"\"\"constructor a deserializer which convert a csv-like bytes array to pandas.DataFrame the args are the same as pandas.read_csv. for details, please refer to the official doc: [pandas.read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) for performance consideration, please specify the following args: - engine = 'c' or 'pyarrow' when possible. Be noticed that 'pyarrow' is the fastest (multi-threaded supported) but may be error-prone. Only use it when you have thoroughly tested. - specify dtype when possible use `usecols` to specify the columns to read, and `names` to specify the column names (i.e., rename the columns), otherwise, the column names will be inferred from the first line. when `names` is specified, it has to be as same length as actual columns of the data. If this causes column renaming, then you should always use column name specified in `names` to access the data (instead of which in `usecols`). Examples: >>> data = \",result,table,_time,code,name\\\\r\\\\n,_result,0,2019-01-01T09:31:00Z,000002.XSHE,\u56fd\u8054\u8bc1\u5238\" >>> des = DataframeDeserializer(names=[\"_\", \"result\", \"table\", \"frame\", \"code\", \"name\"], usecols=[\"frame\", \"code\", \"name\"]) >>> des(data) frame code name 0 2019-01-01T09:31:00Z 000002.XSHE \u56fd\u8054\u8bc1\u5238 Args: sort_values: sort the dataframe by the specified columns encoding: if the data is bytes, then encoding is required, due to pandas.read_csv only handle string array sep: the separator/delimiter of each fields header: the row number of the header, default is 'infer' names: the column names of the dataframe index_col: the column number or name of the index column usecols: the column name of the columns to use dtype: the dtype of the columns engine: the engine of the csv file, default is None converters: specify converter for columns. skiprows: the row number to skip skipfooter: the row number to skip at the end of the file time_col: the columns to parse as dates infer_datetime_format: whether to infer the datetime format lineterminator: the line terminator of the csv file, only valid when engine is 'c' kwargs: other arguments \"\"\" self . sort_values = sort_values self . encoding = encoding self . sep = sep self . header = header self . names = names self . index_col = index_col self . usecols = usecols self . dtype = dtype self . engine = engine self . converters = converters or {} self . skiprows = skiprows self . skipfooter = skipfooter self . infer_datetime_format = infer_datetime_format self . lineterminator = lineterminator self . kwargs = kwargs if names is not None : self . header = 0 if time_col is not None : self . converters [ time_col ] = lambda x : ciso8601 . parse_datetime_as_naive ( x )","title":"__init__()"},{"location":"api/dal/serialize/#numpydeserializer","text":"Source code in omicron/dal/influx/serialize.py class NumpyDeserializer ( Serializer ): def __init__ ( self , dtype : List [ tuple ] = \"float\" , sort_values : Union [ str , List [ str ]] = None , use_cols : Union [ List [ str ], List [ int ]] = None , parse_date : Union [ int , str ] = \"_time\" , sep : str = \",\" , encoding : str = \"utf-8\" , skip_rows : Union [ int , List [ int ]] = 1 , header_line : int = 1 , comments : str = \"#\" , converters : Mapping [ int , Callable ] = None , ): \"\"\"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. ``` dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None def _parse_header_once ( self , stream ): \"\"\"parse header and convert use_cols, if columns is specified in string. And if parse_date is required, add it into converters Args: stream : [description] Raises: SerializationError: [description] \"\"\" if self . header_line is None or self . _parsed_headers is not None : return try : line = stream . readlines ( self . header_line )[ - 1 ] cols = line . strip () . split ( self . sep ) self . _parsed_headers = cols use_cols = self . use_cols if use_cols is not None and isinstance ( use_cols [ 0 ], str ): self . use_cols = [ cols . index ( col ) for col in self . use_cols ] # convert keys of converters to int converters = { cols . index ( k ): v for k , v in self . converters . items ()} self . converters = converters if isinstance ( self . parse_date , str ): parse_date = cols . index ( self . parse_date ) if parse_date in self . converters . keys (): logger . debug ( \"specify duplicated converter in both parse_date and converters for col %s , use converters.\" , self . parse_date , ) else : # \u589e\u52a0parse_date\u5230converters self . converters [ parse_date ] = lambda x : ciso8601 . parse_datetime_as_naive ( x ) stream . seek ( 0 ) except ( IndexError , ValueError ): if line . strip () == \"\" : content = \"\" . join ( stream . readlines ()) . strip () if len ( content ) > 0 : raise SerializationError ( f \"specified heder line { self . header_line } is empty\" ) else : raise EmptyResult () else : raise SerializationError ( f \"bad header[ { self . header_line } ]: { line } \" ) def __call__ ( self , data : bytes ) -> np . ndarray : if self . encoding and isinstance ( data , bytes ): stream = io . StringIO ( data . decode ( self . encoding )) else : stream = io . StringIO ( data ) try : self . _parse_header_once ( stream ) except EmptyResult : return np . empty (( 0 ,), dtype = self . dtype ) arr = np . loadtxt ( stream . readlines (), delimiter = self . sep , skiprows = self . skip_rows , dtype = self . dtype , usecols = self . use_cols , converters = self . converters , encoding = self . encoding , ) # \u5982\u679c\u8fd4\u56de\u4ec5\u4e00\u6761\u8bb0\u5f55\uff0c\u6709\u65f6\u4f1a\u51fa\u73b0 shape == () if arr . shape == tuple (): arr = arr . reshape (( - 1 ,)) if self . sort_values is not None and arr . size > 1 : return np . sort ( arr , order = self . sort_values ) else : return arr","title":"NumpyDeserializer"},{"location":"api/dal/serialize/#omicron.dal.influx.serialize.NumpyDeserializer.__init__","text":"construct a deserializer, which will convert a csv like multiline string/bytes array to a numpy array the data to be deserialized will be first split into array of fields, then use use_cols to select which fields to use, and re-order them by the order of use_cols. After that, the fields will be converted to numpy array and converted into dtype. by default dtype is float, which means the data will be converted to float. If you need to convert to a numpy structured array, then you can specify the dtype as a list of tuples, e.g. 1 dtype = [('col_1', 'datetime64[s]'), ('col_2', ' 1 : assert all ( [ isinstance ( x , int ) for x in self . converters . keys ()] ), \"converters must be a dict of column index to converter function, if there's no header\" self . _parsed_headers = None","title":"__init__()"},{"location":"api/plotting/candlestick/","text":"\u7ed8\u5236K\u7ebf\u56fe\u3002 \u7528\u6cd5\u793a\u4f8b \u00b6 \u6ce8\u610f\u793a\u4f8b\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5426\u5219\u65e0\u6cd5\u751f\u6210\u56fe\u3002 1 2 3 4 5 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars ) cs . plot () \u8fd9\u5c06\u751f\u6210\u4e0b\u56fe\uff1a \u9ed8\u8ba4\u5730\uff0c\u5c06\u663e\u793a\u6210\u4ea4\u91cf\u548cRSI\u6307\u6807\u4e24\u4e2a\u526f\u56fe\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6765\u5b9a\u5236\uff1a 1 2 3 4 5 cs = Candlestick ( bars , show_volume = True , show_rsi = True , show_peaks = False } cs . plot () \u589e\u52a0\u6807\u8bb0 \u00b6 1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_marks ([ 20 , 50 ]) cs . plot () \u8fd9\u5c06\u5728k\u7ebf\u4e0a\u663e\u793a\u4e24\u4e2a\u52a0\u53f7\uff1a \u663e\u793a\u5e03\u6797\u5e26 \u00b6 1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_indicator ( \"bbands\" , 20 ) cs . plot () \u663e\u793a\u5e73\u53f0 \u00b6 1 2 3 4 5 6 7 8 9 10 11 12 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . mark_bbox () cs . plot () Candlestick \u00b6 Source code in omicron/plotting/candlestick.py class Candlestick : RED = \"#FF4136\" GREEN = \"#3DAA70\" TRANSPARENT = \"rgba(0,0,0,0)\" LIGHT_GRAY = \"rgba(0, 0, 0, 0.1)\" MA_COLORS = { 5 : \"#1432F5\" , 10 : \"#EB52F7\" , 20 : \"#C0C0C0\" , 30 : \"#882111\" , 60 : \"#5E8E28\" , 120 : \"#4294F7\" , 250 : \"#F09937\" , } def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line @property def figure ( self ): \"\"\"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61\"\"\" rows = len ( self . ind_traces ) + 1 specs = [[{ \"secondary_y\" : False }]] * rows specs [ 0 ][ 0 ][ \"secondary_y\" ] = True row_heights = [ 0.7 , * ([ 0.3 / ( rows - 1 )] * ( rows - 1 ))] print ( row_heights ) cols = 1 fig = make_subplots ( rows = rows , cols = cols , shared_xaxes = True , vertical_spacing = 0.1 , subplot_titles = ( self . title , * self . ind_traces . keys ()), row_heights = row_heights , specs = specs , ) for _ , trace in self . main_traces . items (): fig . add_trace ( trace , row = 1 , col = 1 ) for i , ( _ , trace ) in enumerate ( self . ind_traces . items ()): fig . add_trace ( trace , row = i + 2 , col = 1 ) ymin = np . min ( self . bars [ \"low\" ]) ymax = np . max ( self . bars [ \"high\" ]) ylim = [ ymin * 0.95 , ymax * 1.05 ] # \u663e\u793a\u5341\u5b57\u5149\u6807 fig . update_xaxes ( showgrid = False , showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikecolor = \"grey\" , spikedash = \"solid\" , spikethickness = 1 , ) fig . update_yaxes ( showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikedash = \"solid\" , spikecolor = \"grey\" , spikethickness = 1 , showgrid = True , gridcolor = self . LIGHT_GRAY , ) fig . update_xaxes ( nticks = len ( self . bars ) // 10 , ticklen = 10 , ticks = \"outside\" , minor = dict ( nticks = 5 , ticklen = 5 , ticks = \"outside\" ), row = rows , col = 1 , ) # \u8bbe\u7f6eK\u7ebf\u663e\u793a\u533a\u57df if self . width : win_size = int ( self . width // 10 ) else : win_size = 120 fig . update_xaxes ( type = \"category\" , range = [ len ( self . bars ) - win_size , len ( self . bars ) - 1 ] ) fig . update_layout ( yaxis = dict ( range = ylim ), hovermode = \"x unified\" , plot_bgcolor = self . TRANSPARENT , xaxis_rangeslider_visible = False , ) if self . width : fig . update_layout ( width = self . width ) if self . height : fig . update_layout ( height = self . height ) return fig def _format_tick ( self , tm : np . array ) -> NDArray : if tm . item ( 0 ) . hour == 0 : # assume it's date return np . array ( [ f \" { x . item () . year : 02 } - { x . item () . month : 02 } - { x . item () . day : 02 } \" for x in tm ] ) else : return np . array ( [ f \" { x . item () . month : 02 } - { x . item () . day : 02 } { x . item () . hour : 02 } : { x . item () . minute : 02 } \" for x in tm ] ) def _remove_ma ( self ): traces = {} for name in self . main_traces : if not name . startswith ( \"ma\" ): traces [ name ] = self . main_traces [ name ] self . main_traces = traces def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show () figure property readonly \u00b6 \u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61 __init__ ( self , bars , ma_groups = None , title = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs ) special \u00b6 \u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default bars ndarray \u884c\u60c5\u6570\u636e required ma_groups List[int] \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e len(bars) - 5 \u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 None title str k\u7ebf\u56fe\u7684\u6807\u9898 None show_volume \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe True show_rsi \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 True show_peaks \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 False width the width in 'px' units of the figure None height the height in 'px' units of the figure None Keyword arguments: Name Type Description rsi_win int default is 6 Source code in omicron/plotting/candlestick.py def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line add_bounding_box ( self , boxes ) \u00b6 bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Parameters: Name Type Description Default boxes List[Tuple] \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 required Source code in omicron/plotting/candlestick.py def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace add_indicator ( self , indicator , ** kwargs ) \u00b6 \u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Parameters: Name Type Description Default indicator str \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' required kwargs \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window {} Source code in omicron/plotting/candlestick.py def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace add_line ( self , trace_name , x , y ) \u00b6 \u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5 x , y \u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required x x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] required y y\u503c required Source code in omicron/plotting/candlestick.py def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line add_main_trace ( self , trace_name , ** kwargs ) \u00b6 add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required **kwargs \u5176\u4ed6\u53c2\u6570 {} Source code in omicron/plotting/candlestick.py def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) add_marks ( self , x , y , name , marker = 'cross' , color = None ) \u00b6 \u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9 Source code in omicron/plotting/candlestick.py def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace mark_backtest_result ( self , result ) \u00b6 \u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e Todo \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Parameters: Name Type Description Default points \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 required Source code in omicron/plotting/candlestick.py def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace mark_bbox ( self , min_size = 20 ) \u00b6 \u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Parameters: Name Type Description Default min_size \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 20 Source code in omicron/plotting/candlestick.py def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) mark_peaks_and_valleys ( self , up_thres = None , down_thres = None ) \u00b6 \u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Parameters: Name Type Description Default up_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None down_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None Source code in omicron/plotting/candlestick.py def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace mark_support_resist_lines ( self , upthres = None , downthres = None , use_close = True , win = 60 ) \u00b6 \u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728 win \u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Parameters: Name Type Description Default upthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None downthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys . None use_close \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. True win \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. 60 Source code in omicron/plotting/candlestick.py def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) plot ( self ) \u00b6 \u7ed8\u5236\u56fe\u8868 Source code in omicron/plotting/candlestick.py def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"CandleStick"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u7528\u6cd5\u793a\u4f8b","text":"\u6ce8\u610f\u793a\u4f8b\u9700\u8981\u5728notebook\u4e2d\u8fd0\u884c\uff0c\u5426\u5219\u65e0\u6cd5\u751f\u6210\u56fe\u3002 1 2 3 4 5 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars ) cs . plot () \u8fd9\u5c06\u751f\u6210\u4e0b\u56fe\uff1a \u9ed8\u8ba4\u5730\uff0c\u5c06\u663e\u793a\u6210\u4ea4\u91cf\u548cRSI\u6307\u6807\u4e24\u4e2a\u526f\u56fe\u3002\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6765\u5b9a\u5236\uff1a 1 2 3 4 5 cs = Candlestick ( bars , show_volume = True , show_rsi = True , show_peaks = False } cs . plot ()","title":"\u7528\u6cd5\u793a\u4f8b"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u589e\u52a0\u6807\u8bb0","text":"1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_marks ([ 20 , 50 ]) cs . plot () \u8fd9\u5c06\u5728k\u7ebf\u4e0a\u663e\u793a\u4e24\u4e2a\u52a0\u53f7\uff1a","title":"\u589e\u52a0\u6807\u8bb0"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u663e\u793a\u5e03\u6797\u5e26","text":"1 2 3 4 5 6 7 8 9 10 11 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . add_indicator ( \"bbands\" , 20 ) cs . plot ()","title":"\u663e\u793a\u5e03\u6797\u5e26"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick--\u663e\u793a\u5e73\u53f0","text":"1 2 3 4 5 6 7 8 9 10 11 12 from omicron.plotting.candlestick import Candlestick bars = await Stock . get_bars ( \"000001.XSHE\" , 120 , FrameType . DAY ) cs = Candlestick ( bars , show_volume = True , show_rsi = False , show_peaks = True ) cs . mark_bbox () cs . plot ()","title":"\u663e\u793a\u5e73\u53f0"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick","text":"Source code in omicron/plotting/candlestick.py class Candlestick : RED = \"#FF4136\" GREEN = \"#3DAA70\" TRANSPARENT = \"rgba(0,0,0,0)\" LIGHT_GRAY = \"rgba(0, 0, 0, 0.1)\" MA_COLORS = { 5 : \"#1432F5\" , 10 : \"#EB52F7\" , 20 : \"#C0C0C0\" , 30 : \"#882111\" , 60 : \"#5E8E28\" , 120 : \"#4294F7\" , 250 : \"#F09937\" , } def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line @property def figure ( self ): \"\"\"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61\"\"\" rows = len ( self . ind_traces ) + 1 specs = [[{ \"secondary_y\" : False }]] * rows specs [ 0 ][ 0 ][ \"secondary_y\" ] = True row_heights = [ 0.7 , * ([ 0.3 / ( rows - 1 )] * ( rows - 1 ))] print ( row_heights ) cols = 1 fig = make_subplots ( rows = rows , cols = cols , shared_xaxes = True , vertical_spacing = 0.1 , subplot_titles = ( self . title , * self . ind_traces . keys ()), row_heights = row_heights , specs = specs , ) for _ , trace in self . main_traces . items (): fig . add_trace ( trace , row = 1 , col = 1 ) for i , ( _ , trace ) in enumerate ( self . ind_traces . items ()): fig . add_trace ( trace , row = i + 2 , col = 1 ) ymin = np . min ( self . bars [ \"low\" ]) ymax = np . max ( self . bars [ \"high\" ]) ylim = [ ymin * 0.95 , ymax * 1.05 ] # \u663e\u793a\u5341\u5b57\u5149\u6807 fig . update_xaxes ( showgrid = False , showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikecolor = \"grey\" , spikedash = \"solid\" , spikethickness = 1 , ) fig . update_yaxes ( showspikes = True , spikemode = \"across\" , spikesnap = \"cursor\" , spikedash = \"solid\" , spikecolor = \"grey\" , spikethickness = 1 , showgrid = True , gridcolor = self . LIGHT_GRAY , ) fig . update_xaxes ( nticks = len ( self . bars ) // 10 , ticklen = 10 , ticks = \"outside\" , minor = dict ( nticks = 5 , ticklen = 5 , ticks = \"outside\" ), row = rows , col = 1 , ) # \u8bbe\u7f6eK\u7ebf\u663e\u793a\u533a\u57df if self . width : win_size = int ( self . width // 10 ) else : win_size = 120 fig . update_xaxes ( type = \"category\" , range = [ len ( self . bars ) - win_size , len ( self . bars ) - 1 ] ) fig . update_layout ( yaxis = dict ( range = ylim ), hovermode = \"x unified\" , plot_bgcolor = self . TRANSPARENT , xaxis_rangeslider_visible = False , ) if self . width : fig . update_layout ( width = self . width ) if self . height : fig . update_layout ( height = self . height ) return fig def _format_tick ( self , tm : np . array ) -> NDArray : if tm . item ( 0 ) . hour == 0 : # assume it's date return np . array ( [ f \" { x . item () . year : 02 } - { x . item () . month : 02 } - { x . item () . day : 02 } \" for x in tm ] ) else : return np . array ( [ f \" { x . item () . month : 02 } - { x . item () . day : 02 } { x . item () . hour : 02 } : { x . item () . minute : 02 } \" for x in tm ] ) def _remove_ma ( self ): traces = {} for name in self . main_traces : if not name . startswith ( \"ma\" ): traces [ name ] = self . main_traces [ name ] self . main_traces = traces def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes ) def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"Candlestick"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.figure","text":"\u8fd4\u56de\u4e00\u4e2afigure\u5bf9\u8c61","title":"figure"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.__init__","text":"\u6784\u9020\u51fd\u6570 Parameters: Name Type Description Default bars ndarray \u884c\u60c5\u6570\u636e required ma_groups List[int] \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e len(bars) - 5 \u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 None title str k\u7ebf\u56fe\u7684\u6807\u9898 None show_volume \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe True show_rsi \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 True show_peaks \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 False width the width in 'px' units of the figure None height the height in 'px' units of the figure None Keyword arguments: Name Type Description rsi_win int default is 6 Source code in omicron/plotting/candlestick.py def __init__ ( self , bars : np . ndarray , ma_groups : List [ int ] = None , title : str = None , show_volume = True , show_rsi = True , show_peaks = False , width = None , height = None , ** kwargs , ): \"\"\"\u6784\u9020\u51fd\u6570 Args: bars: \u884c\u60c5\u6570\u636e ma_groups: \u5747\u7ebf\u7ec4\u53c2\u6570\u3002\u6bd4\u5982[5, 10, 20]\u8868\u660e\u5411k\u7ebf\u56fe\u4e2d\u6dfb\u52a05, 10, 20\u65e5\u5747\u7ebf\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4ece\u6570\u7ec4[5, 10, 20, 30, 60, 120, 250]\u4e2d\u53d6\u76f4\u5230\u4e0e`len(bars) - 5`\u5339\u914d\u7684\u53c2\u6570\u4e3a\u6b62\u3002\u6bd4\u5982bars\u957f\u5ea6\u4e3a30\uff0c\u5219\u5c06\u53d6[5, 10, 20]\u6765\u7ed8\u5236\u5747\u7ebf\u3002 title: k\u7ebf\u56fe\u7684\u6807\u9898 show_volume: \u662f\u5426\u663e\u793a\u6210\u4ea4\u91cf\u56fe show_rsi: \u662f\u5426\u663e\u793aRSI\u56fe\u3002\u7f3a\u7701\u663e\u793a\u53c2\u6570\u4e3a6\u7684RSI\u56fe\u3002 show_peaks: \u662f\u5426\u6807\u8bb0\u68c0\u6d4b\u51fa\u6765\u7684\u5cf0\u8ddf\u8c37\u3002 width: the width in 'px' units of the figure height: the height in 'px' units of the figure Keyword Args: rsi_win int: default is 6 \"\"\" self . title = title self . bars = bars self . width = width self . height = height # traces for main area self . main_traces = {} # traces for indicator area self . ind_traces = {} self . ticks = self . _format_tick ( bars [ \"frame\" ]) self . _bar_close = array_math_round ( bars [ \"close\" ], 2 ) . astype ( np . float64 ) # for every candlestick, it must contain a candlestick plot cs = go . Candlestick ( x = self . ticks , open = bars [ \"open\" ], high = bars [ \"high\" ], low = bars [ \"low\" ], close = self . _bar_close , line = dict ({ \"width\" : 1 }), name = \"K\u7ebf\" , ** kwargs , ) # Set line and fill colors cs . increasing . fillcolor = \"rgba(255,255,255,0.9)\" cs . increasing . line . color = self . RED cs . decreasing . fillcolor = self . GREEN cs . decreasing . line . color = self . GREEN self . main_traces [ \"ohlc\" ] = cs if show_volume : self . add_indicator ( \"volume\" ) if show_peaks : self . add_main_trace ( \"peaks\" ) if show_rsi : self . add_indicator ( \"rsi\" , win = kwargs . get ( \"rsi_win\" , 6 )) # \u589e\u52a0\u5747\u7ebf if ma_groups is None : nbars = len ( bars ) if nbars < 9 : ma_groups = [] else : groups = np . array ([ 5 , 10 , 20 , 30 , 60 , 120 , 250 ]) idx = max ( np . argwhere ( groups < ( nbars - 5 ))) . item () + 1 ma_groups = groups [: idx ] for win in ma_groups : name = f \"ma { win } \" if win > len ( bars ): continue ma = moving_average ( self . _bar_close , win ) line = go . Scatter ( y = ma , x = self . ticks , name = name , line = dict ( width = 1 , color = self . MA_COLORS . get ( win )), ) self . main_traces [ name ] = line","title":"__init__()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_bounding_box","text":"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Parameters: Name Type Description Default boxes List[Tuple] \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 required Source code in omicron/plotting/candlestick.py def add_bounding_box ( self , boxes : List [ Tuple ]): \"\"\"bbox\u662f\u6807\u8bb0\u5728k\u7ebf\u56fe\u4e0a\u67d0\u4e2a\u533a\u95f4\u5185\u7684\u77e9\u5f62\u6846\uff0c\u5b83\u4ee5\u8be5\u533a\u95f4\u6700\u9ad8\u4ef7\u548c\u6700\u4f4e\u4ef7\u4e3a\u4e0a\u4e0b\u8fb9\u3002 Args: boxes: \u6bcf\u4e2a\u5143\u7d20(start, width)\u8868\u793a\u5404\u4e2abbox\u7684\u8d77\u70b9\u548c\u5bbd\u5ea6\u3002 \"\"\" for j , box in enumerate ( boxes ): x , y = [], [] i , width = box if len ( x ): x . append ( None ) y . append ( None ) group = self . bars [ i : i + width ] mean = np . mean ( group [ \"close\" ]) std = 2 * np . std ( group [ \"close\" ]) # \u843d\u5728\u4e24\u4e2a\u6807\u51c6\u5dee\u4ee5\u5185\u7684\u5b9e\u4f53\u6700\u4e0a\u65b9\u548c\u6700\u4e0b\u65b9\u503c hc = np . max ( group [ group [ \"close\" ] < mean + std ][ \"close\" ]) lc = np . min ( group [ group [ \"close\" ] > mean - std ][ \"close\" ]) ho = np . max ( group [ group [ \"open\" ] < mean + std ][ \"open\" ]) lo = np . min ( group [ group [ \"open\" ] > mean - std ][ \"open\" ]) h = max ( hc , ho ) low = min ( lo , lc ) x . extend ( self . ticks [[ i , i + width - 1 , i + width - 1 , i , i ]]) y . extend (( h , h , low , low , h )) hover = f \"\u5bbd\u5ea6: { width }
\u632f\u5e45: { h / low - 1 : .2% } \" trace = go . Scatter ( x = x , y = y , fill = \"toself\" , name = f \"\u5e73\u53f0\u6574\u7406 { j } \" , text = hover ) self . main_traces [ f \"bbox- { j } \" ] = trace","title":"add_bounding_box()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_indicator","text":"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Parameters: Name Type Description Default indicator str \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' required kwargs \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window {} Source code in omicron/plotting/candlestick.py def add_indicator ( self , indicator : str , ** kwargs ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6280\u672f\u6307\u6807 Args: indicator: \u5f53\u524d\u652f\u6301\u503c\u6709'volume', 'rsi', 'bbands' kwargs: \u8ba1\u7b97\u67d0\u4e2aindicator\u65f6\uff0c\u9700\u8981\u7684\u53c2\u6570\u3002\u6bd4\u5982\u8ba1\u7b97bbands\u65f6\uff0c\u9700\u8981\u4f20\u5165\u5747\u7ebf\u7684window \"\"\" if indicator == \"volume\" : colors = np . repeat ( self . RED , len ( self . bars )) colors [ self . bars [ \"close\" ] <= self . bars [ \"open\" ]] = self . GREEN trace = go . Bar ( x = self . ticks , y = self . bars [ \"volume\" ], showlegend = False , marker = { \"color\" : colors }, ) elif indicator == \"rsi\" : win = kwargs . get ( \"win\" ) rsi = talib . RSI ( self . _bar_close , win ) # type: ignore trace = go . Scatter ( x = self . ticks , y = rsi , showlegend = False ) elif indicator == \"bbands\" : self . _remove_ma () win = kwargs . get ( \"win\" ) for name , ind in zip ( [ \"bbands-high\" , \"bbands-mean\" , \"bbands-low\" ], talib . BBANDS ( self . _bar_close , win ), # type: ignore ): trace = go . Scatter ( x = self . ticks , y = ind , showlegend = True , name = name ) self . main_traces [ name ] = trace return else : raise ValueError ( f \" { indicator } not supported\" ) self . ind_traces [ indicator ] = trace","title":"add_indicator()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_line","text":"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5 x , y \u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required x x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] required y y\u503c required Source code in omicron/plotting/candlestick.py def add_line ( self , trace_name : str , x : List [ int ], y : List [ float ]): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u589e\u52a0\u4ee5`x`,`y`\u8868\u793a\u7684\u4e00\u6761\u76f4\u7ebf Args: trace_name : \u56fe\u4f8b\u540d\u79f0 x : x\u8f74\u5750\u6807\uff0c\u6240\u6709\u7684x\u503c\u90fd\u5fc5\u987b\u5c5e\u4e8e[0, len(self.bars)] y : y\u503c \"\"\" line = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"lines\" , name = trace_name ) self . main_traces [ trace_name ] = line","title":"add_line()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_main_trace","text":"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Parameters: Name Type Description Default trace_name \u56fe\u4f8b\u540d\u79f0 required **kwargs \u5176\u4ed6\u53c2\u6570 {} Source code in omicron/plotting/candlestick.py def add_main_trace ( self , trace_name : str , ** kwargs ): \"\"\"add trace to main plot \u652f\u6301\u7684\u56fe\u4f8b\u7c7b\u522b\u6709peaks, bbox\uff08bounding-box), bt(\u56de\u6d4b), support_line, resist_line Args: trace_name : \u56fe\u4f8b\u540d\u79f0 **kwargs : \u5176\u4ed6\u53c2\u6570 \"\"\" if trace_name == \"peaks\" : self . mark_peaks_and_valleys ( kwargs . get ( \"up_thres\" , 0.03 ), kwargs . get ( \"down_thres\" , - 0.03 ) ) # \u6807\u6ce8\u77e9\u5f62\u6846 elif trace_name == \"bbox\" : self . add_bounding_box ( kwargs . get ( \"boxes\" )) # \u56de\u6d4b\u7ed3\u679c elif trace_name == \"bt\" : self . add_backtest_result ( kwargs . get ( \"bt\" )) # \u589e\u52a0\u76f4\u7ebf elif trace_name == \"support_line\" : self . add_line ( \"\u652f\u6491\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" )) elif trace_name == \"resist_line\" : self . add_line ( \"\u538b\u529b\u7ebf\" , kwargs . get ( \"x\" ), kwargs . get ( \"y\" ))","title":"add_main_trace()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.add_marks","text":"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9 Source code in omicron/plotting/candlestick.py def add_marks ( self , x : List [ int ], y : List [ float ], name : str , marker : str = \"cross\" , color : Optional [ str ] = None , ): \"\"\"\u5411k\u7ebf\u56fe\u4e2d\u589e\u52a0\u6807\u8bb0\u70b9\"\"\" trace = go . Scatter ( x = self . ticks [ x ], y = y , mode = \"markers\" , marker_symbol = marker , marker_color = color , name = name , ) self . main_traces [ name ] = trace","title":"add_marks()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_backtest_result","text":"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e Todo \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Parameters: Name Type Description Default points \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 required Source code in omicron/plotting/candlestick.py def mark_backtest_result ( self , result : dict ): \"\"\"\u6807\u8bb0\u4e70\u5356\u70b9\u548c\u56de\u6d4b\u6570\u636e TODO: \u6b64\u65b9\u6cd5\u53ef\u80fd\u672a\u4e0ebacktest\u8fd4\u56de\u503c\u540c\u6b65\u3002\u6b64\u5916\uff0c\u5728portofolio\u56de\u6d4b\u4e2d\uff0c\u4e0d\u53ef\u80fd\u5728k\u7ebf\u56fe\u4e2d\u4f7f\u7528\u6b64\u65b9\u6cd5\u3002 Args: points : \u4e70\u5356\u70b9\u7684\u5750\u6807\u3002 \"\"\" trades = result . get ( \"trades\" ) assets = result . get ( \"assets\" ) x , y , labels = [], [], [] hover = [] labels_color = defaultdict ( list ) for trade in trades : trade_date = arrow . get ( trade [ \"time\" ]) . date () asset = assets . get ( trade_date ) security = trade [ \"security\" ] price = trade [ \"price\" ] volume = trade [ \"volume\" ] side = trade [ \"order_side\" ] x . append ( self . _format_tick ( trade_date )) bar = self . bars [ self . bars [ \"frame\" ] == trade_date ] if side == \"\u4e70\u5165\" : hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u4e70\u5165\u4ef7: { price }
\u80a1\u6570: { volume } \" ) y . append ( bar [ \"high\" ][ 0 ] * 1.1 ) labels . append ( \"B\" ) labels_color [ \"color\" ] . append ( self . RED ) else : y . append ( bar [ \"low\" ][ 0 ] * 0.99 ) hover . append ( f \"\u603b\u8d44\u4ea7: { asset }

{ side } : { security }
\u5356\u51fa\u4ef7: { price }
\u80a1\u6570: { volume } \" ) labels . append ( \"S\" ) labels_color [ \"color\" ] . append ( self . GREEN ) labels_color . append ( self . GREEN ) # txt.append(f'{side}:{security}
\u5356\u51fa\u4ef7:{price}
\u80a1\u6570:{volume}') trace = go . Scatter ( x = x , y = y , mode = \"text\" , text = labels , name = \"backtest\" , hovertext = hover , textfont = labels_color , ) self . main_traces [ \"bs\" ] = trace","title":"mark_backtest_result()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_bbox","text":"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Parameters: Name Type Description Default min_size \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 20 Source code in omicron/plotting/candlestick.py def mark_bbox ( self , min_size : int = 20 ): \"\"\"\u5728k\u7ebf\u56fe\u4e0a\u68c0\u6d4b\u5e76\u6807\u6ce8\u77e9\u5f62\u6846 Args: min_size : \u77e9\u5f62\u6846\u7684\u6700\u5c0f\u957f\u5ea6 \"\"\" boxes = plateaus ( self . _bar_close , min_size ) self . add_main_trace ( \"bbox\" , boxes = boxes )","title":"mark_bbox()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_peaks_and_valleys","text":"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Parameters: Name Type Description Default up_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None down_thres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None Source code in omicron/plotting/candlestick.py def mark_peaks_and_valleys ( self , up_thres : Optional [ float ] = None , down_thres : Optional [ float ] = None ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u5cf0\u8c37\u70b9 Args: up_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] down_thres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1[omicron.talib.morph.peaks_and_valleys][] \"\"\" bars = self . bars flags = peaks_and_valleys ( self . _bar_close , up_thres , down_thres ) # \u79fb\u9664\u9996\u5c3e\u7684\u9876\u5e95\u6807\u8bb0\uff0c\u4e00\u822c\u60c5\u51b5\u4e0b\u5b83\u4eec\u90fd\u4e0d\u662f\u771f\u6b63\u7684\u9876\u548c\u5e95\u3002 flags [ 0 ] = 0 flags [ - 1 ] = 0 marker_margin = ( max ( bars [ \"high\" ]) - min ( bars [ \"low\" ])) * 0.05 ticks_up = self . ticks [ flags == 1 ] y_up = bars [ \"high\" ][ flags == 1 ] + marker_margin ticks_down = self . ticks [ flags == - 1 ] y_down = bars [ \"low\" ][ flags == - 1 ] - marker_margin trace = go . Scatter ( mode = \"markers\" , x = ticks_up , y = y_up , marker_symbol = \"triangle-down\" , name = \"\u5cf0\" ) self . main_traces [ \"peaks\" ] = trace trace = go . Scatter ( mode = \"markers\" , x = ticks_down , y = y_down , marker_symbol = \"triangle-up\" , name = \"\u8c37\" , ) self . main_traces [ \"valleys\" ] = trace","title":"mark_peaks_and_valleys()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.mark_support_resist_lines","text":"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728 win \u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Parameters: Name Type Description Default upthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys None downthres \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1 omicron.talib.morph.peaks_and_valleys . None use_close \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. True win \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. 60 Source code in omicron/plotting/candlestick.py def mark_support_resist_lines ( self , upthres : float = None , downthres : float = None , use_close = True , win = 60 ): \"\"\"\u5728K\u7ebf\u56fe\u4e0a\u6807\u6ce8\u652f\u6491\u7ebf\u548c\u538b\u529b\u7ebf \u5728`win`\u4e2ak\u7ebf\u5185\uff0c\u627e\u51fa\u6240\u6709\u7684\u5c40\u90e8\u5cf0\u8c37\u70b9\uff0c\u5e76\u4ee5\u6700\u9ad8\u7684\u4e24\u4e2a\u5cf0\u8fde\u7ebf\u751f\u6210\u538b\u529b\u7ebf\uff0c\u4ee5\u6700\u4f4e\u7684\u4e24\u4e2a\u8c37\u8fde\u7ebf\u751f\u6210\u652f\u6491\u7ebf\u3002 Args: upthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys` downthres : \u7528\u6765\u68c0\u6d4b\u5cf0\u8c37\u65f6\u4f7f\u7528\u7684\u9608\u503c\uff0c\u53c2\u89c1`omicron.talib.morph.peaks_and_valleys`. use_close : \u662f\u5426\u4f7f\u7528\u6536\u76d8\u4ef7\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u5982\u679c\u4e3aFalse\uff0c\u5219\u4f7f\u7528high\u6765\u68c0\u6d4b\u538b\u529b\u7ebf\uff0c\u4f7f\u7528low\u6765\u68c0\u6d4b\u652f\u6491\u7ebf. win : \u68c0\u6d4b\u5c40\u90e8\u9ad8\u4f4e\u70b9\u7684\u7a97\u53e3. \"\"\" bars = self . bars [ - win :] clipped = len ( self . bars ) - win if use_close : support , resist , x_start = support_resist_lines ( self . _bar_close , upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x )) else : # \u4f7f\u7528\"high\"\u548c\"low\" bars = self . bars [ - win :] support , _ , x_start = support_resist_lines ( bars [ \"low\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"support_line\" , x = x + clipped , y = support ( x )) _ , resist , x_start = support_resist_lines ( bars [ \"high\" ], upthres , downthres ) x = np . arange ( len ( bars ))[ x_start :] self . add_main_trace ( \"resist_line\" , x = x + clipped , y = resist ( x ))","title":"mark_support_resist_lines()"},{"location":"api/plotting/candlestick/#omicron.plotting.candlestick.Candlestick.plot","text":"\u7ed8\u5236\u56fe\u8868 Source code in omicron/plotting/candlestick.py def plot ( self ): \"\"\"\u7ed8\u5236\u56fe\u8868\"\"\" fig = self . figure fig . show ()","title":"plot()"},{"location":"api/plotting/metrics/","text":"\u7ed8\u5236\u56de\u6d4b\u8d44\u4ea7\u66f2\u7ebf\u548c\u6307\u6807\u56fe\u3002 \u793a\u4f8b: 1 2 3 4 5 6 from omicron.plotting import MetricsGraph # calling some strategy's backtest and get bills/metrics mg = MetricsGraph ( bills , metrics ) await mg . plot () \u6ce8\u610f\u6b64\u65b9\u6cd5\u9700\u8981\u5728notebook\u4e2d\u8c03\u7528\u3002 MetricsGraph \u00b6 Source code in omicron/plotting/metrics.py class MetricsGraph : def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" , dtype = np . float64 ), how = \"right\" , ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" def _fill_missing_prices ( self , bars : BarsArray , frames : Union [ List , NDArray ]): \"\"\"\u5c06bars\u4e2d\u7f3a\u5931\u503c\u91c7\u7528\u5176\u524d\u503c\u66ff\u6362 \u5f53baseline\u4e3a\u4e2a\u80a1\u65f6\uff0c\u53ef\u80fd\u5b58\u5728\u505c\u724c\u7684\u60c5\u51b5\uff0c\u8fd9\u6837\u5bfc\u81f4\u7531\u6b64\u8ba1\u7b97\u7684\u53c2\u8003\u6536\u76ca\u65e0\u6cd5\u4e0e\u56de\u6d4b\u7684\u8d44\u4ea7\u6536\u76ca\u5bf9\u9f50\uff0c\u56e0\u6b64\u9700\u8981\u8fdb\u884c\u8c03\u6574\u3002 \u51fa\u4e8e\u8fd9\u4e2a\u76ee\u7684\uff0c\u672c\u51fd\u6570\u53ea\u8fd4\u56de\u5904\u7406\u540e\u7684\u6536\u76d8\u4ef7\u3002 Args: bars: \u57fa\u7ebf\u884c\u60c5\u6570\u636e\u3002 frames: \u65e5\u671f\u7d22\u5f15 Returns: \u8865\u5145\u7f3a\u5931\u503c\u540e\u7684\u6536\u76d8\u4ef7\u5e8f\u5217 \"\"\" _close = pd . DataFrame ( { \"close\" : pd . Series ( bars [ \"close\" ], index = bars [ \"frame\" ]), \"frame\" : pd . Series ( np . empty (( len ( frames ),)), index = frames ), } )[ \"close\" ] . to_numpy () # \u8fd9\u91cc\u4f7f\u7528omicron\u4e2d\u7684fill_nan\uff0c\u662f\u56e0\u4e3a\u5982\u679c\u6570\u7ec4\u7684\u7b2c\u4e00\u4e2a\u5143\u7d20\u5373\u4e3aNaN\u7684\u8bdd\uff0c\u90a3\u4e48DataFrame.fillna(method='ffill')\u5c06\u65e0\u6cd5\u5904\u7406\u8fd9\u6837\u7684\u60c5\u51b5(\u4ecd\u7136\u4fdd\u6301\u4e3anan) return fill_nan ( _close ) def _format_tick ( self , frames : Union [ Frame , List [ Frame ]]) -> Union [ str , NDArray ]: if type ( frames ) == datetime . date : x = frames return f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" elif type ( frames ) == datetime . datetime : x = frames return f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" elif type ( frames [ 0 ]) == datetime . date : # type: ignore return np . array ([ f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" for x in frames ]) else : return np . array ( [ f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" for x in frames ] # type: ignore ) async def _metrics_trace ( self ): metric_names = { \"start\" : \"\u8d77\u59cb\u65e5\" , \"end\" : \"\u7ed3\u675f\u65e5\" , \"window\" : \"\u8d44\u4ea7\u66b4\u9732\u7a97\u53e3\" , \"total_tx\" : \"\u4ea4\u6613\u6b21\u6570\" , \"total_profit\" : \"\u603b\u5229\u6da6\" , \"total_profit_rate\" : \"\u5229\u6da6\u7387\" , \"win_rate\" : \"\u80dc\u7387\" , \"mean_return\" : \"\u65e5\u5747\u56de\u62a5\" , \"sharpe\" : \"\u590f\u666e\u7387\" , \"max_drawdown\" : \"\u6700\u5927\u56de\u64a4\" , \"annual_return\" : \"\u5e74\u5316\u56de\u62a5\" , \"volatility\" : \"\u6ce2\u52a8\u7387\" , \"sortino\" : \"sortino\" , \"calmar\" : \"calmar\" , } # bug: plotly go.Table.Cells format not work here metric_formatter = { \"start\" : \" {} \" , \"end\" : \" {} \" , \"window\" : \" {} \" , \"total_tx\" : \" {} \" , \"total_profit\" : \" {:.2f} \" , \"total_profit_rate\" : \" {:.2%} \" , \"win_rate\" : \" {:.2%} \" , \"mean_return\" : \" {:.2%} \" , \"sharpe\" : \" {:.2f} \" , \"max_drawdown\" : \" {:.2%} \" , \"annual_return\" : \" {:.2%} \" , \"volatility\" : \" {:.2%} \" , \"sortino\" : \" {:.2f} \" , \"calmar\" : \" {:.2f} \" , } metrics = deepcopy ( self . metrics ) baseline = metrics [ \"baseline\" ] or {} del metrics [ \"baseline\" ] baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) metrics_formatted = [] for k in metric_names . keys (): if metrics . get ( k ): metrics_formatted . append ( metric_formatter [ k ] . format ( metrics . get ( k ))) else : metrics_formatted . append ( \"-\" ) baseline_formatted = [] for k in metric_names . keys (): if baseline . get ( k ): baseline_formatted . append ( metric_formatter [ k ] . format ( baseline . get ( k ))) else : baseline_formatted . append ( \"-\" ) return go . Table ( header = dict ( values = [ \"\u6307\u6807\u540d\" , \"\u7b56\u7565\" , baseline_name ]), cells = dict ( values = [ [ v for _ , v in metric_names . items ()], metrics_formatted , baseline_formatted , ], font_size = 10 , ), ) async def _trade_info_trace ( self ): \"\"\"\u6784\u5efahover text \u5e8f\u5217\"\"\" # convert trades into hover_info buys = defaultdict ( list ) sells = defaultdict ( list ) for _ , trade in self . trades . items (): trade_date = arrow . get ( trade [ \"time\" ]) . date () ipos = self . _frame2pos . get ( trade_date ) if ipos is None : logger . warning ( \"date %s in trade record not in backtest range\" , trade_date ) continue name = await Security . alias ( trade [ \"security\" ]) price = trade [ \"price\" ] side = trade [ \"order_side\" ] filled = trade [ \"filled\" ] trade_text = f \" { side } : { name } { filled / 100 : .0f } \u624b \u4ef7\u683c: { price : .02f } \u6210\u4ea4\u989d: { filled * price / 10000 : .1f } \u4e07\" if side == \"\u5356\u51fa\" : sells [ trade_date ] . append ( trade_text ) elif side in ( \"\u4e70\u5165\" , \"\u5206\u7ea2\u914d\u80a1\" ): buys [ trade_date ] . append ( trade_text ) X_buy , Y_buy , data_buy = [], [], [] X_sell , Y_sell , data_sell = [], [], [] for dt , text in buys . items (): ipos = self . _frame2pos . get ( dt ) Y_buy . append ( self . nv [ ipos ]) X_buy . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_buy . append ( hover ) trace_buy = go . Scatter ( x = X_buy , y = Y_buy , mode = \"markers\" , text = data_buy , name = \"\u4e70\u5165\u6210\u4ea4\" , marker = dict ( color = \"red\" , symbol = \"triangle-up\" ), hovertemplate = \"
% {text} \" , ) for dt , text in sells . items (): ipos = self . _frame2pos . get ( dt ) Y_sell . append ( self . nv [ ipos ]) X_sell . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_sell . append ( hover ) trace_sell = go . Scatter ( x = X_sell , y = Y_sell , mode = \"markers\" , text = data_sell , name = \"\u5356\u51fa\u6210\u4ea4\" , marker = dict ( color = \"green\" , symbol = \"triangle-down\" ), hovertemplate = \"
% {text} \" , ) return trace_buy , trace_sell async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show () __init__ ( self , bills , metrics , baseline_code = '399300.XSHE' , indicator = None ) special \u00b6 Parameters: Name Type Description Default bills dict \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 required metrics dict \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 required baseline_code str \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 '399300.XSHE' indicator Optional[pandas.core.frame.DataFrame] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe None Source code in omicron/plotting/metrics.py def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" , dtype = np . float64 ), how = \"right\" , ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" plot ( self ) async \u00b6 \u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe Source code in omicron/plotting/metrics.py async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"MetricsGraph"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph","text":"Source code in omicron/plotting/metrics.py class MetricsGraph : def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" , dtype = np . float64 ), how = \"right\" , ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\" def _fill_missing_prices ( self , bars : BarsArray , frames : Union [ List , NDArray ]): \"\"\"\u5c06bars\u4e2d\u7f3a\u5931\u503c\u91c7\u7528\u5176\u524d\u503c\u66ff\u6362 \u5f53baseline\u4e3a\u4e2a\u80a1\u65f6\uff0c\u53ef\u80fd\u5b58\u5728\u505c\u724c\u7684\u60c5\u51b5\uff0c\u8fd9\u6837\u5bfc\u81f4\u7531\u6b64\u8ba1\u7b97\u7684\u53c2\u8003\u6536\u76ca\u65e0\u6cd5\u4e0e\u56de\u6d4b\u7684\u8d44\u4ea7\u6536\u76ca\u5bf9\u9f50\uff0c\u56e0\u6b64\u9700\u8981\u8fdb\u884c\u8c03\u6574\u3002 \u51fa\u4e8e\u8fd9\u4e2a\u76ee\u7684\uff0c\u672c\u51fd\u6570\u53ea\u8fd4\u56de\u5904\u7406\u540e\u7684\u6536\u76d8\u4ef7\u3002 Args: bars: \u57fa\u7ebf\u884c\u60c5\u6570\u636e\u3002 frames: \u65e5\u671f\u7d22\u5f15 Returns: \u8865\u5145\u7f3a\u5931\u503c\u540e\u7684\u6536\u76d8\u4ef7\u5e8f\u5217 \"\"\" _close = pd . DataFrame ( { \"close\" : pd . Series ( bars [ \"close\" ], index = bars [ \"frame\" ]), \"frame\" : pd . Series ( np . empty (( len ( frames ),)), index = frames ), } )[ \"close\" ] . to_numpy () # \u8fd9\u91cc\u4f7f\u7528omicron\u4e2d\u7684fill_nan\uff0c\u662f\u56e0\u4e3a\u5982\u679c\u6570\u7ec4\u7684\u7b2c\u4e00\u4e2a\u5143\u7d20\u5373\u4e3aNaN\u7684\u8bdd\uff0c\u90a3\u4e48DataFrame.fillna(method='ffill')\u5c06\u65e0\u6cd5\u5904\u7406\u8fd9\u6837\u7684\u60c5\u51b5(\u4ecd\u7136\u4fdd\u6301\u4e3anan) return fill_nan ( _close ) def _format_tick ( self , frames : Union [ Frame , List [ Frame ]]) -> Union [ str , NDArray ]: if type ( frames ) == datetime . date : x = frames return f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" elif type ( frames ) == datetime . datetime : x = frames return f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" elif type ( frames [ 0 ]) == datetime . date : # type: ignore return np . array ([ f \" { x . year : 02 } - { x . month : 02 } - { x . day : 02 } \" for x in frames ]) else : return np . array ( [ f \" { x . month : 02 } - { x . day : 02 } { x . hour : 02 } : { x . minute : 02 } \" for x in frames ] # type: ignore ) async def _metrics_trace ( self ): metric_names = { \"start\" : \"\u8d77\u59cb\u65e5\" , \"end\" : \"\u7ed3\u675f\u65e5\" , \"window\" : \"\u8d44\u4ea7\u66b4\u9732\u7a97\u53e3\" , \"total_tx\" : \"\u4ea4\u6613\u6b21\u6570\" , \"total_profit\" : \"\u603b\u5229\u6da6\" , \"total_profit_rate\" : \"\u5229\u6da6\u7387\" , \"win_rate\" : \"\u80dc\u7387\" , \"mean_return\" : \"\u65e5\u5747\u56de\u62a5\" , \"sharpe\" : \"\u590f\u666e\u7387\" , \"max_drawdown\" : \"\u6700\u5927\u56de\u64a4\" , \"annual_return\" : \"\u5e74\u5316\u56de\u62a5\" , \"volatility\" : \"\u6ce2\u52a8\u7387\" , \"sortino\" : \"sortino\" , \"calmar\" : \"calmar\" , } # bug: plotly go.Table.Cells format not work here metric_formatter = { \"start\" : \" {} \" , \"end\" : \" {} \" , \"window\" : \" {} \" , \"total_tx\" : \" {} \" , \"total_profit\" : \" {:.2f} \" , \"total_profit_rate\" : \" {:.2%} \" , \"win_rate\" : \" {:.2%} \" , \"mean_return\" : \" {:.2%} \" , \"sharpe\" : \" {:.2f} \" , \"max_drawdown\" : \" {:.2%} \" , \"annual_return\" : \" {:.2%} \" , \"volatility\" : \" {:.2%} \" , \"sortino\" : \" {:.2f} \" , \"calmar\" : \" {:.2f} \" , } metrics = deepcopy ( self . metrics ) baseline = metrics [ \"baseline\" ] or {} del metrics [ \"baseline\" ] baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) metrics_formatted = [] for k in metric_names . keys (): if metrics . get ( k ): metrics_formatted . append ( metric_formatter [ k ] . format ( metrics . get ( k ))) else : metrics_formatted . append ( \"-\" ) baseline_formatted = [] for k in metric_names . keys (): if baseline . get ( k ): baseline_formatted . append ( metric_formatter [ k ] . format ( baseline . get ( k ))) else : baseline_formatted . append ( \"-\" ) return go . Table ( header = dict ( values = [ \"\u6307\u6807\u540d\" , \"\u7b56\u7565\" , baseline_name ]), cells = dict ( values = [ [ v for _ , v in metric_names . items ()], metrics_formatted , baseline_formatted , ], font_size = 10 , ), ) async def _trade_info_trace ( self ): \"\"\"\u6784\u5efahover text \u5e8f\u5217\"\"\" # convert trades into hover_info buys = defaultdict ( list ) sells = defaultdict ( list ) for _ , trade in self . trades . items (): trade_date = arrow . get ( trade [ \"time\" ]) . date () ipos = self . _frame2pos . get ( trade_date ) if ipos is None : logger . warning ( \"date %s in trade record not in backtest range\" , trade_date ) continue name = await Security . alias ( trade [ \"security\" ]) price = trade [ \"price\" ] side = trade [ \"order_side\" ] filled = trade [ \"filled\" ] trade_text = f \" { side } : { name } { filled / 100 : .0f } \u624b \u4ef7\u683c: { price : .02f } \u6210\u4ea4\u989d: { filled * price / 10000 : .1f } \u4e07\" if side == \"\u5356\u51fa\" : sells [ trade_date ] . append ( trade_text ) elif side in ( \"\u4e70\u5165\" , \"\u5206\u7ea2\u914d\u80a1\" ): buys [ trade_date ] . append ( trade_text ) X_buy , Y_buy , data_buy = [], [], [] X_sell , Y_sell , data_sell = [], [], [] for dt , text in buys . items (): ipos = self . _frame2pos . get ( dt ) Y_buy . append ( self . nv [ ipos ]) X_buy . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_buy . append ( hover ) trace_buy = go . Scatter ( x = X_buy , y = Y_buy , mode = \"markers\" , text = data_buy , name = \"\u4e70\u5165\u6210\u4ea4\" , marker = dict ( color = \"red\" , symbol = \"triangle-up\" ), hovertemplate = \"
% {text} \" , ) for dt , text in sells . items (): ipos = self . _frame2pos . get ( dt ) Y_sell . append ( self . nv [ ipos ]) X_sell . append ( self . _format_tick ( dt )) asset = self . assets [ ipos ] hover = f \"\u8d44\u4ea7: { asset / 10000 : .1f } \u4e07
{ '
' . join ( text ) } \" data_sell . append ( hover ) trace_sell = go . Scatter ( x = X_sell , y = Y_sell , mode = \"markers\" , text = data_sell , name = \"\u5356\u51fa\u6210\u4ea4\" , marker = dict ( color = \"green\" , symbol = \"triangle-down\" ), hovertemplate = \"
% {text} \" , ) return trace_buy , trace_sell async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"MetricsGraph"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph.__init__","text":"Parameters: Name Type Description Default bills dict \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 required metrics dict \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 required baseline_code str \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 '399300.XSHE' indicator Optional[pandas.core.frame.DataFrame] \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe None Source code in omicron/plotting/metrics.py def __init__ ( self , bills : dict , metrics : dict , baseline_code : str = \"399300.XSHE\" , indicator : Optional [ pd . DataFrame ] = None , ): \"\"\" Args: bills: \u56de\u6d4b\u751f\u6210\u7684\u8d26\u5355\uff0c\u901a\u8fc7Strategy.bills\u83b7\u5f97 metrics: \u56de\u6d4b\u751f\u6210\u7684\u6307\u6807\uff0c\u901a\u8fc7strategy.metrics\u83b7\u5f97 baseline_code: \u57fa\u51c6\u8bc1\u5238\u4ee3\u7801 indicator: \u56de\u6d4b\u65f6\u4f7f\u7528\u7684\u6307\u6807\u3002\u5982\u679c\u5b58\u5728\uff0c\u5c06\u53e0\u52a0\u5230\u7b56\u7565\u56de\u6d4b\u56fe\u4e0a\u3002\u5b83\u5e94\u8be5\u662f\u4e00\u4e2a\u4ee5\u65e5\u671f\u4e3a\u7d22\u5f15\uff0c\u6307\u6807\u503c\u5217\u540d\u4e3a\"value\"\u7684pandas.DataFrame\u3002\u5982\u679c\u4e0d\u63d0\u4f9b\uff0c\u5c06\u4e0d\u4f1a\u7ed8\u5236\u6307\u6807\u56fe \"\"\" self . metrics = metrics self . trades = bills [ \"trades\" ] self . positions = bills [ \"positions\" ] self . start = arrow . get ( bills [ \"assets\" ][ 0 ][ 0 ]) . date () self . end = arrow . get ( bills [ \"assets\" ][ - 1 ][ 0 ]) . date () self . frames = [ tf . int2date ( f ) for f in tf . get_frames ( self . start , self . end , FrameType . DAY ) ] if indicator is not None : self . indicator = indicator . join ( pd . Series ( index = self . frames , name = \"frames\" , dtype = np . float64 ), how = \"right\" , ) else : self . indicator = None # \u8bb0\u5f55\u65e5\u671f\u5230\u4e0b\u6807\u7684\u53cd\u5411\u6620\u5c04 self . _frame2pos = { f : i for i , f in enumerate ( self . frames )} self . ticks = self . _format_tick ( self . frames ) # TODO: there's bug in backtesting, temporarily fix here df = pd . DataFrame ( self . frames , columns = [ \"frame\" ]) df [ \"assets\" ] = np . nan assets = pd . DataFrame ( bills [ \"assets\" ], columns = [ \"frame\" , \"assets\" ]) df [ \"assets\" ] = assets [ \"assets\" ] self . assets = df . fillna ( method = \"ffill\" )[ \"assets\" ] . to_numpy () self . nv = self . assets / self . assets [ 0 ] self . baseline_code = baseline_code or \"399300.XSHE\"","title":"__init__()"},{"location":"api/plotting/metrics/#omicron.plotting.metrics.MetricsGraph.plot","text":"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe Source code in omicron/plotting/metrics.py async def plot ( self ): \"\"\"\u7ed8\u5236\u8d44\u4ea7\u66f2\u7ebf\u53ca\u56de\u6d4b\u6307\u6807\u56fe\"\"\" n = len ( self . assets ) bars = await Stock . get_bars ( self . baseline_code , n , FrameType . DAY , self . end ) baseline_prices = self . _fill_missing_prices ( bars , self . frames ) baseline_prices /= baseline_prices [ 0 ] fig = make_subplots ( rows = 1 , cols = 2 , shared_xaxes = False , specs = [ [{ \"secondary_y\" : True }, { \"type\" : \"table\" }], ], column_width = [ 0.75 , 0.25 ], horizontal_spacing = 0.01 , subplot_titles = ( \"\u8d44\u4ea7\u66f2\u7ebf\" , \"\u7b56\u7565\u6307\u6807\" ), ) fig . add_trace ( await self . _metrics_trace (), row = 1 , col = 2 ) if self . indicator is not None : indicator_on_hover = self . indicator [ \"value\" ] else : indicator_on_hover = None baseline_name = ( await Security . alias ( self . baseline_code ) if self . baseline_code else \"\u57fa\u51c6\" ) baseline_trace = go . Scatter ( y = baseline_prices , x = self . ticks , mode = \"lines\" , name = baseline_name , showlegend = True , text = indicator_on_hover , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" + \"
\u6307\u6807:% {text:.1f} \" , ) fig . add_trace ( baseline_trace , row = 1 , col = 1 ) nv_trace = go . Scatter ( y = self . nv , x = self . ticks , mode = \"lines\" , name = \"\u7b56\u7565\" , showlegend = True , hovertemplate = \"
\u51c0\u503c:% {y:.2f} \" , ) fig . add_trace ( nv_trace , row = 1 , col = 1 ) if self . indicator is not None : ind_trace = go . Scatter ( y = self . indicator [ \"value\" ], x = self . ticks , mode = \"lines\" , name = \"indicator\" , showlegend = True , visible = \"legendonly\" , ) fig . add_trace ( ind_trace , row = 1 , col = 1 , secondary_y = True ) for trace in await self . _trade_info_trace (): fig . add_trace ( trace , row = 1 , col = 1 ) fig . update_xaxes ( type = \"category\" , tickangle = 45 , nticks = len ( self . ticks ) // 5 ) fig . update_layout ( margin = dict ( l = 20 , r = 20 , t = 50 , b = 50 ), width = 1040 , height = 435 ) fig . update_layout ( hovermode = \"x unified\" , hoverlabel = dict ( bgcolor = \"rgba(255,255,255,0.8)\" ) ) fig . show ()","title":"plot()"}]} \ No newline at end of file diff --git a/2.0.0.a76/sitemap.xml.gz b/2.0.0.a76/sitemap.xml.gz index d64db14628d8a6e397faaf8dd91a28fc1a7def12..fdb62937371503c8ec970e48ab216326ec706cc6 100644 GIT binary patch delta 14 Vcmcb_c!`lszMF$X@WMp4GXNp21gii5 delta 14 Vcmcb_c!`lszMF$XuW=&V82}*y1bF}e