diff --git a/.gitignore b/.gitignore index 4681f8b57..2a0bf53ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ __pycache__/ *.egg-info/ -*.pyc \ No newline at end of file +*.pyc +.idea/ diff --git a/multiagent/environment.py b/multiagent/environment.py index d2e8d3278..86df92de0 100644 --- a/multiagent/environment.py +++ b/multiagent/environment.py @@ -210,7 +210,7 @@ def render(self, mode='human'): else: word = alphabet[np.argmax(other.state.c)] message += (other.name + ' to ' + agent.name + ': ' + word + ' ') - print(message) + # print(message) for i in range(len(self.viewers)): # create viewers (if necessary) @@ -231,7 +231,8 @@ def render(self, mode='human'): geom = rendering.make_circle(entity.size) xform = rendering.Transform() if 'agent' in entity.name: - geom.set_color(*entity.color, alpha=0.5) + color = (entity.color[0], entity.color[1], entity.color[2], 0.5) + geom.set_color(*color) else: geom.set_color(*entity.color) geom.add_attr(xform) diff --git a/multiagent/multi_discrete.py b/multiagent/multi_discrete.py index d7108ad43..041484729 100644 --- a/multiagent/multi_discrete.py +++ b/multiagent/multi_discrete.py @@ -4,7 +4,8 @@ import numpy as np import gym -from gym.spaces import prng +from gym.utils import seeding + class MultiDiscrete(gym.Space): """ @@ -27,10 +28,12 @@ def __init__(self, array_of_param_array): self.high = np.array([x[1] for x in array_of_param_array]) self.num_discrete_space = self.low.shape[0] + self.random = seeding.np_random() + def sample(self): """ Returns a array with one sample from each discrete action space """ # For each row: round(random .* (max - min) + min, 0) - random_array = prng.np_random.rand(self.num_discrete_space) + random_array = self.random.rand(self.num_discrete_space) return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)] def contains(self, x): return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all() diff --git a/multiagent/policy.py b/multiagent/policy.py index cf9ad0e1b..9c9cc783b 100644 --- a/multiagent/policy.py +++ b/multiagent/policy.py @@ -1,6 +1,8 @@ import numpy as np from pyglet.window import key +from multiagent.scenarios.simple import Scenario + # individual agent policy class Policy(object): def __init__(self): @@ -14,6 +16,7 @@ class InteractivePolicy(Policy): def __init__(self, env, agent_index): super(InteractivePolicy, self).__init__() self.env = env + #self.agent_index = agent_index # hard-coded keyboard events self.move = [False for i in range(4)] self.comm = [False for i in range(env.world.dim_c)] diff --git a/multiagent/rendering.py b/multiagent/rendering.py index cd00c7fb8..d72f1b98e 100644 --- a/multiagent/rendering.py +++ b/multiagent/rendering.py @@ -11,19 +11,30 @@ os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib' # (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite -from gym.utils import reraise +#from gym.utils import reraise from gym import error try: import pyglet except ImportError as e: - reraise(suffix="HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.") + #reraise(suffix="HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.") + raise ImportError(''' + Cannot import pyglet. + HINT: you can install pyglet directly via 'pip install pyglet'. + But if you really just want to install all Gym dependencies and not have to think about it, + 'pip install -e .[all]' or 'pip install gym[all]' will do it. + ''') try: from pyglet.gl import * except ImportError as e: - reraise(prefix="Error occured while running `from pyglet.gl import *`",suffix="HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \"-screen 0 1400x900x24\" python '") - + #reraise(prefix="Error occured while running `from pyglet.gl import *`",suffix="HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \"-screen 0 1400x900x24\" python '") + raise ImportError(''' + Error occured while running `from pyglet.gl import *` + HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. + If you're running on a server, you may need a virtual frame buffer; something like this should work: + 'xvfb-run -s \"-screen 0 1400x900x24\" python ' + ''') import math import numpy as np @@ -342,4 +353,4 @@ def close(self): self.window.close() self.isopen = False def __del__(self): - self.close() \ No newline at end of file + self.close() diff --git a/multiagent/scenario.py b/multiagent/scenario.py index 02d86773e..1718ea9bf 100644 --- a/multiagent/scenario.py +++ b/multiagent/scenario.py @@ -8,3 +8,7 @@ def make_world(self): # create initial conditions of the world def reset_world(self, world): raise NotImplementedError() + def reward(self, agent, world): + raise NotImplementedError() + def observation(self, agent, world): + raise NotImplementedError()