Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Add ids option for needimport #1292

Merged
merged 2 commits into from
Sep 13, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion docs/directives/needimport.rst
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,25 @@ In most cases this should be the latest available version.
tags
~~~~

You can attach tags to existing tags of imported needs using the ``:tags:`` option.
You can attach tags to existing tags of imported needs using the ``:tags:`` option
(as a comma-separated list).
This may be useful to mark easily imported needs and to create specialised filters for them.

ids
~~~

.. versionadded:: 3.1.0

You can use the ``:ids:`` option to import only the needs with the given ids
(as a comma-separated list).
This is useful if you want to import only a subset of the needs from the JSON file.

filter
~~~~~~

You can use the ``:filter:`` option to imports only the needs which pass the filter criteria.
This is a string that is evaluated as a Python expression,
it is less performant than the ``:ids:`` option, but more flexible.

Please read :ref:`filter` for more information.

Expand Down
39 changes: 24 additions & 15 deletions sphinx_needs/directives/needimport.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from sphinx_needs.defaults import string_to_boolean
from sphinx_needs.filter_common import filter_single_need
from sphinx_needs.logging import log_warning
from sphinx_needs.needsfile import check_needs_file
from sphinx_needs.needsfile import SphinxNeedsFileException, check_needs_data
from sphinx_needs.utils import add_doc, import_prefix_link_edit, logger


Expand All @@ -37,6 +37,7 @@ class NeedimportDirective(SphinxDirective):
"version": directives.unchanged_required,
"hide": directives.flag,
"collapse": string_to_boolean,
"ids": directives.unchanged_required,
"filter": directives.unchanged_required,
"id_prefix": directives.unchanged_required,
"tags": directives.unchanged_required,
Expand All @@ -56,10 +57,6 @@ def run(self) -> Sequence[nodes.Node]:
filter_string = self.options.get("filter")
id_prefix = self.options.get("id_prefix", "")

tags = self.options.get("tags", [])
if len(tags) > 0:
tags = [tag.strip() for tag in re.split("[;,]", tags)]

need_import_path = self.arguments[0]

# check if given arguemnt is downloadable needs.json path
Expand Down Expand Up @@ -115,21 +112,21 @@ def run(self) -> Sequence[nodes.Node]:
f"Could not load needs import file {correct_need_import_path}"
)

errors = check_needs_file(correct_need_import_path)
try:
with open(correct_need_import_path) as needs_file:
needs_import_list = json.load(needs_file)
except json.JSONDecodeError as e:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OSError should also be caught, but there is already a TODO

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(knowing what exceptions to handle is always a pain, I was reading this with interest recently https://discuss.python.org/t/extend-type-hints-to-cover-exceptions/23788/51)

chrisjsewell marked this conversation as resolved.
Show resolved Hide resolved
# TODO: Add exception handling
raise SphinxNeedsFileException(correct_need_import_path) from e

errors = check_needs_data(needs_import_list)
if errors.schema:
logger.info(
f"Schema validation errors detected in file {correct_need_import_path}:"
)
for error in errors.schema:
logger.info(f' {error.message} -> {".".join(error.path)}')

try:
with open(correct_need_import_path) as needs_file:
needs_import_list = json.load(needs_file)
except json.JSONDecodeError as e:
# TODO: Add exception handling
raise e

if version is None:
try:
version = needs_import_list["current_version"]
Expand All @@ -146,6 +143,13 @@ def run(self) -> Sequence[nodes.Node]:

needs_config = NeedsSphinxConfig(self.config)
data = needs_import_list["versions"][version]

if ids := self.options.get("ids"):
id_list = [i.strip() for i in ids.split(",") if i.strip()]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can IDs contain a dynamic function, so you would have to use the 'split with dyn' function?

Copy link
Member Author

@chrisjsewell chrisjsewell Sep 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh no definitely not, no dynamic functions here; this is intended for fast filtering

data["needs"] = {
key: data["needs"][key] for key in id_list if key in data["needs"]
}

# TODO this is not exactly NeedsInfoType, because the export removes/adds some keys
needs_list: dict[str, NeedsInfoType] = data["needs"]
if schema := data.get("needs_schema"):
Expand Down Expand Up @@ -184,8 +188,13 @@ def run(self) -> Sequence[nodes.Node]:
needs_list = needs_list_filtered

# tags update
for need in needs_list.values():
need["tags"] = need["tags"] + tags
if tags := [
tag.strip()
for tag in re.split("[;,]", self.options.get("tags", ""))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here, the new function should be used (or is that in a different open PR?)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this was already existing, but again no dynamic functions.
I feel you are confusing this directive with a need declaration directive, that is the only place we use dynamic functions in the directive options

if tag.strip()
]:
for need in needs_list.values():
need["tags"] = need["tags"] + tags

import_prefix_link_edit(needs_list, id_prefix, needs_config.extra_links)

Expand Down
30 changes: 24 additions & 6 deletions sphinx_needs/needsfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import sys
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from typing import Any, Iterable

from jsonschema import Draft7Validator
Expand Down Expand Up @@ -242,21 +243,38 @@ def check_needs_file(path: str) -> Errors:
:param path: File path to a needs.json file
:return: Dict, with error reports
"""
schema_path = os.path.join(os.path.dirname(__file__), "needsfile.json")
with open(schema_path) as schema_file:
needs_schema = json.load(schema_file)

with open(path) as needs_file:
try:
needs_data = json.load(needs_file)
data = json.load(needs_file)
except json.JSONDecodeError as e:
raise SphinxNeedsFileException(
f'Problems loading json file "{path}". '
f"Maybe it is empty or has an invalid json format. Original exception: {e}"
)
return check_needs_data(data)


@lru_cache
def _load_schema() -> dict[str, Any]:
schema_path = os.path.join(os.path.dirname(__file__), "needsfile.json")
with open(schema_path) as schema_file:
return json.load(schema_file) # type: ignore[no-any-return]


def check_needs_data(data: Any) -> Errors:
"""
Checks a given json-file, if it passes our needs.json structure tests.

Current checks:
* Schema validation

:param data: Loaded needs.json file
:return: Dict, with error reports
"""
needs_schema = _load_schema()

validator = Draft7Validator(needs_schema)
schema_errors = list(validator.iter_errors(needs_data))
schema_errors = list(validator.iter_errors(data))

# In future there may be additional types of validations.
# So lets already use a class for all errors
Expand Down
Loading
Loading