forked from PKU-Alignment/Safe-Policy-Optimization
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
46 lines (40 loc) · 1.29 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import os
from setuptools import find_packages, setup
with open(os.path.join("safepo", "version.txt"), "r") as file_handler:
__version__ = file_handler.read().strip()
long_description = """
todo
"""
setup(
name="safepo",
packages=[package for package in find_packages() if package.startswith("safepo")],
package_data={"safepo": ["py.typed", "version.txt"]},
install_requires=[
"psutil",
"joblib",
"tensorboard",
"pyyaml",
"matplotlib",
"tensorboardX",
"gym==0.15.3"
# we recommend use conda install scipy and mpi4py
],
description="Pytorch version of Safe Reinforcement Learning Algorithm",
author="PKU-MARL",
url="https://github.com/PKU-MARL/safepo-Baselines",
author_email="jiamg.ji@gmail.com",
keywords="Safe Single Agent Reinforcement Learning"
"Safe Mult Agent Rinforcement Learning",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
version=__version__,
python_requires=">=3.7",
# PyPI package information.
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)