diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index bbb44bca..32e1f0fc 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -45,7 +45,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] name: Tests on Python ${{ matrix.python-version }} steps: - name: Set up python diff --git a/CHANGELOG.md b/CHANGELOG.md index 78501a44..68f1a2e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Support on Python 3.11 ([#388](https://github.com/Substra/substra/pull/388)) + ## [0.48.1](https://github.com/Substra/substra/releases/tag/0.48.1) - 2023-10-06 ### Changed diff --git a/setup.py b/setup.py index d5246d1b..9a9dd2b4 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,7 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], keywords=["cli", "substra"], packages=find_packages(exclude=["tests*"]), diff --git a/tests/data_factory.py b/tests/data_factory.py index eab6f296..32bdd767 100644 --- a/tests/data_factory.py +++ b/tests/data_factory.py @@ -56,11 +56,11 @@ def fake_data(self, n_samples=None): @tools.register def score(inputs, outputs, task_properties): - y_true = inputs['{InputIdentifiers.datasamples}'][1] - y_pred = _get_predictions(inputs['{InputIdentifiers.predictions}']) + y_true = inputs['{InputIdentifiers.datasamples.value}'][1] + y_pred = _get_predictions(inputs['{InputIdentifiers.predictions.value}']) res = sum(y_pred) - sum(y_true) print(f'metrics, y_true: {{y_true}}, y_pred: {{y_pred}}, result: {{res}}') - tools.save_performance(res, outputs['{OutputIdentifiers.performance}']) + tools.save_performance(res, outputs['{OutputIdentifiers.performance.value}']) def _get_predictions(path): with open(path) as f: @@ -77,9 +77,9 @@ def _get_predictions(path): @tools.register def train(inputs, outputs, task_properties): - X = inputs['{InputIdentifiers.datasamples}'][0] - y = inputs['{InputIdentifiers.datasamples}'][1] - models_path = inputs.get('{InputIdentifiers.shared}', []) + X = inputs['{InputIdentifiers.datasamples.value}'][0] + y = inputs['{InputIdentifiers.datasamples.value}'][1] + models_path = inputs.get('{InputIdentifiers.shared.value}', []) models = [_load_model(model_path) for model_path in models_path] print(f'Train, get X: {{X}}, y: {{y}}, models: {{models}}') @@ -95,17 +95,17 @@ def train(inputs, outputs, task_properties): res = dict(value=avg + err) print(f'Train, return {{res}}') - _save_model(res, outputs['{OutputIdentifiers.shared}']) + _save_model(res, outputs['{OutputIdentifiers.shared.value}']) @tools.register def predict(inputs, outputs, task_properties): - X = inputs['{InputIdentifiers.datasamples}'][0] - model = _load_model(inputs['{InputIdentifiers.shared}']) + X = inputs['{InputIdentifiers.datasamples.value}'][0] + model = _load_model(inputs['{InputIdentifiers.shared.value}']) res = [x * model['value'] for x in X] print(f'Predict, get X: {{X}}, model: {{model}}, return {{res}}') - _save_predictions(res, outputs['{OutputIdentifiers.predictions}']) + _save_predictions(res, outputs['{OutputIdentifiers.predictions.value}']) def _load_model(path): with open(path) as f: @@ -129,18 +129,18 @@ def _save_predictions(y_pred, path): @tools.register def aggregate(inputs, outputs, task_properties): - models_path = inputs.get('{InputIdentifiers.shared}', []) + models_path = inputs.get('{InputIdentifiers.shared.value}', []) models = [_load_model(model_path) for model_path in models_path] print(f'Aggregate models: {{models}}') values = [m['value'] for m in models] avg = sum(values) / len(values) res = dict(value=avg) print(f'Aggregate result: {{res}}') - _save_model(res, outputs['{OutputIdentifiers.shared}']) + _save_model(res, outputs['{OutputIdentifiers.shared.value}']) @tools.register def predict(inputs, outputs, task_properties): - _save_predictions(0, outputs['{OutputIdentifiers.predictions}']) + _save_predictions(0, outputs['{OutputIdentifiers.predictions.value}']) def _load_model(path): with open(path) as f: @@ -165,12 +165,12 @@ def _save_predictions(y_pred, path): @tools.register def train(inputs, outputs, task_properties): - X = inputs['{InputIdentifiers.datasamples}'][0] - y = inputs['{InputIdentifiers.datasamples}'][1] - head_model_path = inputs.get('{InputIdentifiers.local}') + X = inputs['{InputIdentifiers.datasamples.value}'][0] + y = inputs['{InputIdentifiers.datasamples.value}'][1] + head_model_path = inputs.get('{InputIdentifiers.local.value}') head_model = _load_model(head_model_path) if head_model_path else None - trunk_model_path = inputs.get('{InputIdentifiers.shared}') + trunk_model_path = inputs.get('{InputIdentifiers.shared.value}') trunk_model = _load_model(trunk_model_path) if trunk_model_path else None print(f'Composite function train X: {{X}}, y: {{y}}, head_model: {{head_model}}, trunk_model: {{trunk_model}}') @@ -192,21 +192,21 @@ def train(inputs, outputs, task_properties): res = dict(value= res_head + err_head), dict(value= res_trunk + err_trunk) print(f'Composite function train head, trunk result: {{res}}') - _save_model(res[0], outputs['{OutputIdentifiers.local}']) - _save_model(res[1], outputs['{OutputIdentifiers.shared}']) + _save_model(res[0], outputs['{OutputIdentifiers.local.value}']) + _save_model(res[1], outputs['{OutputIdentifiers.shared.value}']) @tools.register def predict(inputs, outputs, task_properties): - X = inputs['{InputIdentifiers.datasamples}'][0] - head_model = _load_model(inputs['{InputIdentifiers.local}']) - trunk_model = _load_model(inputs['{InputIdentifiers.shared}']) + X = inputs['{InputIdentifiers.datasamples.value}'][0] + head_model = _load_model(inputs['{InputIdentifiers.local.value}']) + trunk_model = _load_model(inputs['{InputIdentifiers.shared.value}']) print(f'Composite function predict X: {{X}}, head_model: {{head_model}}, trunk_model: {{trunk_model}}') ratio_sum = head_model['value'] + trunk_model['value'] res = [x * ratio_sum for x in X] print(f'Composite function predict result: {{res}}') - _save_predictions(res, outputs['{OutputIdentifiers.predictions}']) + _save_predictions(res, outputs['{OutputIdentifiers.predictions.value}']) def _load_model(path): with open(path) as f: