Blob Blame History Raw
From db1a118ed0d36633c67513961b479f8fae3cc2b9 Mon Sep 17 00:00:00 2001
From: Ivan Devat <idevat@redhat.com>
Date: Thu, 15 Jun 2017 11:46:12 +0200
Subject: [PATCH] squash bz1447910 bundle resources are missing meta

d21dd0e6b4d3 make resource enable | disable work with bundles

27d46c115210 make resource manage | unmanage work with bundles

c963cdcd321b show bundles' meta attributes in resources listing

f1923af76d73 support meta attributes in 'resource bundle create'

e09015ee868a support meta attributes in 'resource bundle update'

c6e70a38346a stop bundles when deleting them
---
 pcs/cli/resource/parse_args.py                     |   4 +-
 pcs/cli/resource/test/test_parse_args.py           |  70 ++++++++
 pcs/lib/cib/nvpair.py                              |  12 +-
 pcs/lib/cib/resource/bundle.py                     |  17 +-
 pcs/lib/cib/resource/common.py                     |  40 +++--
 pcs/lib/cib/test/test_nvpair.py                    |  42 +++++
 pcs/lib/cib/test/test_resource_common.py           |  16 +-
 pcs/lib/cib/tools.py                               |  10 +-
 pcs/lib/commands/resource.py                       |  86 +++++++---
 pcs/lib/commands/test/resource/fixture.py          |   2 +-
 .../commands/test/resource/test_bundle_create.py   | 179 +++++++++++++++----
 .../commands/test/resource/test_bundle_update.py   | 102 ++++++++++-
 .../test/resource/test_resource_enable_disable.py  |  93 ++++++++--
 .../test/resource/test_resource_manage_unmanage.py | 189 +++++++++++++++++++--
 pcs/lib/pacemaker/state.py                         |  40 ++++-
 pcs/lib/pacemaker/test/test_state.py               | 108 +++++++++++-
 pcs/pcs.8                                          |   6 +-
 pcs/resource.py                                    |  99 ++++++++---
 pcs/test/cib_resource/test_bundle.py               |  67 ++++++++
 pcs/test/cib_resource/test_manage_unmanage.py      |   5 +-
 pcs/test/test_resource.py                          |  40 +++--
 pcs/usage.py                                       |   9 +-
 22 files changed, 1055 insertions(+), 181 deletions(-)

diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
index 19ee8f9..366acac 100644
--- a/pcs/cli/resource/parse_args.py
+++ b/pcs/cli/resource/parse_args.py
@@ -58,7 +58,7 @@ def parse_create(arg_list):
 
 def _parse_bundle_groups(arg_list):
     repeatable_keyword_list = ["port-map", "storage-map"]
-    keyword_list = ["container", "network"] + repeatable_keyword_list
+    keyword_list = ["meta", "container", "network"] + repeatable_keyword_list
     groups = group_by_keywords(
         arg_list,
         set(keyword_list),
@@ -99,6 +99,7 @@ def parse_bundle_create_options(arg_list):
             prepare_options(storage_map)
             for storage_map in groups.get("storage-map", [])
         ],
+        "meta": prepare_options(groups.get("meta", []))
     }
     if not parts["container_type"]:
         parts["container_type"] = "docker"
@@ -144,6 +145,7 @@ def parse_bundle_update_options(arg_list):
         "port_map_remove": port_map["remove"],
         "storage_map_add": storage_map["add"],
         "storage_map_remove": storage_map["remove"],
+        "meta": prepare_options(groups.get("meta", []))
     }
     return parts
 
diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
index 5033ec7..0c936cc 100644
--- a/pcs/cli/resource/test/test_parse_args.py
+++ b/pcs/cli/resource/test/test_parse_args.py
@@ -220,6 +220,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -235,6 +236,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -247,6 +249,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -259,6 +262,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -280,6 +284,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {"a": "b", "c": "d"},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -309,6 +314,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [{"a": "b", "c": "d"}],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -321,6 +327,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [{"a": "b", "c": "d"}, {"e": "f"}],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -347,6 +354,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [{"a": "b", "c": "d"}],
+                "meta": {},
             }
         )
 
@@ -359,6 +367,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {},
                 "port_map": [],
                 "storage_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+                "meta": {},
             }
         )
 
@@ -368,6 +377,28 @@ class ParseBundleCreateOptions(TestCase):
     def test_storage_map_missing_key(self):
         self.assert_raises_cmdline(["storage-map", "=b", "c=d"])
 
+    def test_meta(self):
+        self.assert_produce(
+            ["meta", "a=b", "c=d"],
+            {
+                "container_type": "docker",
+                "container": {},
+                "network": {},
+                "port_map": [],
+                "storage_map": [],
+                "meta": {"a": "b", "c": "d"},
+            }
+        )
+
+    def test_meta_empty(self):
+        self.assert_raises_cmdline(["meta"])
+
+    def test_meta_missing_value(self):
+        self.assert_raises_cmdline(["meta", "a", "c=d"])
+
+    def test_meta_missing_key(self):
+        self.assert_raises_cmdline(["meta", "=b", "c=d"])
+
     def test_all(self):
         self.assert_produce(
             [
@@ -377,6 +408,7 @@ class ParseBundleCreateOptions(TestCase):
                 "port-map", "m=n", "o=p",
                 "storage-map", "q=r", "s=t",
                 "storage-map", "u=v", "w=x",
+                "meta", "y=z", "A=B",
             ],
             {
                 "container_type": "lxc",
@@ -384,6 +416,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {"e": "f", "g": "h"},
                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+                "meta": {"y": "z", "A": "B"},
             }
         )
 
@@ -391,11 +424,13 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             [
                 "storage-map", "q=r", "s=t",
+                "meta", "y=z",
                 "port-map", "i=j", "k=l",
                 "network", "e=f",
                 "container", "lxc", "a=b",
                 "storage-map", "u=v", "w=x",
                 "port-map", "m=n", "o=p",
+                "meta", "A=B",
                 "network", "g=h",
                 "container", "c=d",
             ],
@@ -405,6 +440,7 @@ class ParseBundleCreateOptions(TestCase):
                 "network": {"e": "f", "g": "h"},
                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+                "meta": {"y": "z", "A": "B"},
             }
         )
 
@@ -432,6 +468,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -445,6 +482,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -467,6 +505,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -519,6 +558,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": ["c", "d", "i"],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -562,9 +602,34 @@ class ParseBundleUpdateOptions(TestCase):
                     {"e": "f", "g": "h",},
                 ],
                 "storage_map_remove": ["c", "d", "i"],
+                "meta": {},
+            }
+        )
+
+    def test_meta(self):
+        self.assert_produce(
+            ["meta", "a=b", "c=d"],
+            {
+                "container": {},
+                "network": {},
+                "port_map_add": [],
+                "port_map_remove": [],
+                "storage_map_add": [],
+                "storage_map_remove": [],
+                "meta": {"a": "b", "c": "d"},
             }
         )
 
+    def test_meta_empty(self):
+        self.assert_raises_cmdline(["meta"])
+
+    def test_meta_missing_value(self):
+        self.assert_raises_cmdline(["meta", "a", "c=d"])
+
+    def test_meta_missing_key(self):
+        self.assert_raises_cmdline(["meta", "=b", "c=d"])
+
+
     def test_all(self):
         self.assert_produce(
             [
@@ -578,6 +643,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "storage-map", "add", "v=w",
                 "storage-map", "remove", "x", "y",
                 "storage-map", "remove", "z",
+                "meta", "A=B", "C=D",
             ],
             {
                 "container": {"a": "b", "c": "d"},
@@ -592,6 +658,7 @@ class ParseBundleUpdateOptions(TestCase):
                     {"v": "w"},
                 ],
                 "storage_map_remove": ["x", "y", "z"],
+                "meta": {"A": "B", "C": "D"},
             }
         )
 
@@ -599,11 +666,13 @@ class ParseBundleUpdateOptions(TestCase):
         self.assert_produce(
             [
                 "storage-map", "remove", "x", "y",
+                "meta", "A=B",
                 "port-map", "remove", "o", "p",
                 "network", "e=f", "g=h",
                 "storage-map", "add", "r=s", "t=u",
                 "port-map", "add", "i=j", "k=l",
                 "container", "a=b", "c=d",
+                "meta", "C=D",
                 "port-map", "remove", "q",
                 "storage-map", "remove", "z",
                 "storage-map", "add", "v=w",
@@ -622,6 +691,7 @@ class ParseBundleUpdateOptions(TestCase):
                     {"v": "w"},
                 ],
                 "storage_map_remove": ["x", "y", "z"],
+                "meta": {"A": "B", "C": "D"},
             }
         )
 
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
index 261d17c..d3f5a5c 100644
--- a/pcs/lib/cib/nvpair.py
+++ b/pcs/lib/cib/nvpair.py
@@ -11,18 +11,19 @@ from functools import partial
 from pcs.lib.cib.tools import create_subelement_id
 from pcs.lib.xml_tools import get_sub_element
 
-def _append_new_nvpair(nvset_element, name, value):
+def _append_new_nvpair(nvset_element, name, value, id_provider=None):
     """
     Create nvpair with name and value as subelement of nvset_element.
 
     etree.Element nvset_element is context of new nvpair
     string name is name attribute of new nvpair
     string value is value attribute of new nvpair
+    IdProvider id_provider -- elements' ids generator
     """
     etree.SubElement(
         nvset_element,
         "nvpair",
-        id=create_subelement_id(nvset_element, name),
+        id=create_subelement_id(nvset_element, name, id_provider),
         name=name,
         value=value
     )
@@ -73,7 +74,7 @@ def arrange_first_nvset(tag_name, context_element, nvpair_dict):
 
     update_nvset(nvset_element, nvpair_dict)
 
-def append_new_nvset(tag_name, context_element, nvpair_dict):
+def append_new_nvset(tag_name, context_element, nvpair_dict, id_provider=None):
     """
     Append new nvset_element comprising nvpairs children (corresponding
     nvpair_dict) to the context_element
@@ -81,12 +82,13 @@ def append_new_nvset(tag_name, context_element, nvpair_dict):
     string tag_name should be "instance_attributes" or "meta_attributes"
     etree.Element context_element is element where new nvset will be appended
     dict nvpair_dict contains source for nvpair children
+    IdProvider id_provider -- elements' ids generator
     """
     nvset_element = etree.SubElement(context_element, tag_name, {
-        "id": create_subelement_id(context_element, tag_name)
+        "id": create_subelement_id(context_element, tag_name, id_provider)
     })
     for name, value in sorted(nvpair_dict.items()):
-        _append_new_nvpair(nvset_element, name, value)
+        _append_new_nvpair(nvset_element, name, value, id_provider)
 
 append_new_instance_attributes = partial(
     append_new_nvset,
diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
index 0fe16f3..8a49c28 100644
--- a/pcs/lib/cib/resource/bundle.py
+++ b/pcs/lib/cib/resource/bundle.py
@@ -9,6 +9,10 @@ from lxml import etree
 
 from pcs.common import report_codes
 from pcs.lib import reports, validate
+from pcs.lib.cib.nvpair import (
+    append_new_meta_attributes,
+    arrange_first_meta_attributes,
+)
 from pcs.lib.cib.resource.primitive import TAG as TAG_PRIMITIVE
 from pcs.lib.cib.tools import find_element_by_tag_and_id
 from pcs.lib.errors import (
@@ -96,7 +100,7 @@ def validate_new(
 
 def append_new(
     parent_element, id_provider, bundle_id, container_type, container_options,
-    network_options, port_map, storage_map
+    network_options, port_map, storage_map, meta_attributes
 ):
     """
     Create new bundle and add it to the CIB
@@ -109,6 +113,7 @@ def append_new(
     dict network_options -- network options
     list of dict port_map -- list of port mapping options
     list of dict storage_map -- list of storage mapping options
+    dict meta_attributes -- meta attributes
     """
     bundle_element = etree.SubElement(parent_element, TAG, {"id": bundle_id})
     # TODO create the proper element once more container_types are supported
@@ -132,6 +137,8 @@ def append_new(
         _append_storage_map(
             storage_element, id_provider, bundle_id, storage_map_options
         )
+    if meta_attributes:
+        append_new_meta_attributes(bundle_element, meta_attributes, id_provider)
     return bundle_element
 
 def validate_update(
@@ -203,7 +210,8 @@ def validate_update(
 
 def update(
     id_provider, bundle_el, container_options, network_options,
-    port_map_add, port_map_remove, storage_map_add, storage_map_remove
+    port_map_add, port_map_remove, storage_map_add, storage_map_remove,
+    meta_attributes
 ):
     """
     Modify an existing bundle (does not touch encapsulated resources)
@@ -216,6 +224,7 @@ def update(
     list of string port_map_remove -- list of port mapping ids to remove
     list of dict storage_map_add -- list of storage mapping options to add
     list of string storage_map_remove -- list of storage mapping ids to remove
+    dict meta_attributes -- meta attributes to update
     """
     bundle_id = bundle_el.get("id")
     update_attributes_remove_empty(
@@ -253,7 +262,11 @@ def update(
             storage_element, id_provider, bundle_id, storage_map_options
         )
 
+    if meta_attributes:
+        arrange_first_meta_attributes(bundle_el, meta_attributes)
+
     # remove empty elements with no attributes
+    # meta attributes are handled in their own function
     for element in (network_element, storage_element):
         if len(element) < 1 and not element.attrib:
             element.getparent().remove(element)
diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
index f9028ff..0e52b4c 100644
--- a/pcs/lib/cib/resource/common.py
+++ b/pcs/lib/cib/resource/common.py
@@ -58,16 +58,18 @@ def find_resources_to_enable(resource_el):
     etree resource_el -- resource element
     """
     if is_bundle(resource_el):
-        # bundles currently cannot be disabled - pcmk does not support that
-        # inner resources are supposed to be managed separately
-        return []
+        to_enable = [resource_el]
+        in_bundle = get_bundle_inner_resource(resource_el)
+        if in_bundle is not None:
+            to_enable.append(in_bundle)
+        return to_enable
 
     if is_any_clone(resource_el):
         return [resource_el, get_clone_inner_resource(resource_el)]
 
     to_enable = [resource_el]
     parent = resource_el.getparent()
-    if is_any_clone(parent):
+    if is_any_clone(parent) or is_bundle(parent):
         to_enable.append(parent)
     return to_enable
 
@@ -109,20 +111,25 @@ def find_resources_to_manage(resource_el):
     # put there manually. If we didn't do it, the resource may stay unmanaged,
     # as a managed primitive in an unmanaged clone / group is still unmanaged
     # and vice versa.
-    # Bundle resources cannot be set as unmanaged - pcmk currently doesn't
-    # support that. Resources in a bundle are supposed to be treated separately.
-    if is_bundle(resource_el):
-        return []
     res_id = resource_el.attrib["id"]
     return (
         [resource_el] # the resource itself
         +
         # its parents
         find_parent(resource_el, "resources").xpath(
+            # a master or a clone which contains a group, a primitve, or a
+            # grouped primitive with the specified id
+            # OR
+            # a group (in a clone, master, etc. - hence //) which contains a
+            # primitive with the specified id
+            # OR
+            # a bundle which contains a primitive with the specified id
             """
                 (./master|./clone)[(group|group/primitive|primitive)[@id='{r}']]
                 |
                 //group[primitive[@id='{r}']]
+                |
+                ./bundle[primitive[@id='{r}']]
             """
             .format(r=res_id)
         )
@@ -164,10 +171,19 @@ def find_resources_to_unmanage(resource_el):
     #   See clone notes above
     #
     # a bundled primitive - the primitive - the primitive
-    # a bundled primitive - the bundle - nothing
-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
-    # an empty bundle - the bundle - nothing
-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
+    # a bundled primitive - the bundle - the bundle and the primitive
+    #  We need to unmanage implicit resources create by pacemaker and there is
+    #  no other way to do it than unmanage the bundle itself.
+    #  Since it is not possible to unbundle a resource, the concers described
+    #  at unclone don't apply here. However to prevent future bugs, in case
+    #  unbundling becomes possible, we unmanage the primitive as well.
+    # an empty bundle - the bundle - the bundle
+    #  There is nothing else to unmanage.
+    if is_bundle(resource_el):
+        in_bundle = get_bundle_inner_resource(resource_el)
+        return (
+            [resource_el, in_bundle] if in_bundle is not None else [resource_el]
+        )
     if is_any_clone(resource_el):
         resource_el = get_clone_inner_resource(resource_el)
     if is_group(resource_el):
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 9b9d9b9..0f6d8f8 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -8,6 +8,7 @@ from __future__ import (
 from lxml import etree
 
 from pcs.lib.cib import nvpair
+from pcs.lib.cib.tools import IdProvider
 from pcs.test.tools.assertions import assert_xml_equal
 from pcs.test.tools.pcs_unittest import TestCase, mock
 from pcs.test.tools.xml import etree_to_str
@@ -25,6 +26,21 @@ class AppendNewNvpair(TestCase):
             """
         )
 
+    def test_with_id_provider(self):
+        nvset_element = etree.fromstring('<nvset id="a"/>')
+        provider = IdProvider(nvset_element)
+        provider.book_ids("a-b")
+        nvpair._append_new_nvpair(nvset_element, "b", "c", provider)
+        assert_xml_equal(
+            etree_to_str(nvset_element),
+            """
+            <nvset id="a">
+                <nvpair id="a-b-1" name="b" value="c"></nvpair>
+            </nvset>
+            """
+        )
+
+
 class UpdateNvsetTest(TestCase):
     @mock.patch(
         "pcs.lib.cib.nvpair.create_subelement_id",
@@ -167,6 +183,32 @@ class AppendNewNvsetTest(TestCase):
             etree_to_str(context_element)
         )
 
+    def test_with_id_provider(self):
+        context_element = etree.fromstring('<context id="a"/>')
+        provider = IdProvider(context_element)
+        provider.book_ids("a-instance_attributes", "a-instance_attributes-1-a")
+        nvpair.append_new_nvset(
+            "instance_attributes",
+            context_element,
+            {
+                "a": "b",
+                "c": "d",
+            },
+            provider
+        )
+        assert_xml_equal(
+            """
+                <context id="a">
+                    <instance_attributes id="a-instance_attributes-1">
+                        <nvpair id="a-instance_attributes-1-a-1" name="a" value="b"/>
+                        <nvpair id="a-instance_attributes-1-c" name="c" value="d"/>
+                    </instance_attributes>
+                </context>
+            """,
+            etree_to_str(context_element)
+        )
+
+
 class ArrangeFirstNvsetTest(TestCase):
     def setUp(self):
         self.root = etree.Element("root", id="root")
diff --git a/pcs/lib/cib/test/test_resource_common.py b/pcs/lib/cib/test/test_resource_common.py
index 52c2329..6b485f7 100644
--- a/pcs/lib/cib/test/test_resource_common.py
+++ b/pcs/lib/cib/test/test_resource_common.py
@@ -180,7 +180,7 @@ class FindResourcesToEnable(TestCase):
         self.assert_find_resources("F2", ["F2"])
 
     def test_primitive_in_bundle(self):
-        self.assert_find_resources("H", ["H"])
+        self.assert_find_resources("H", ["H", "H-bundle"])
 
     def test_group(self):
         self.assert_find_resources("D", ["D"])
@@ -204,10 +204,10 @@ class FindResourcesToEnable(TestCase):
         self.assert_find_resources("F-master", ["F-master", "F"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class Enable(TestCase):
@@ -360,7 +360,7 @@ class FindResourcesToManage(TestCase):
         self.assert_find_resources("F2", ["F2", "F-master", "F"])
 
     def test_primitive_in_bundle(self):
-        self.assert_find_resources("H", ["H"])
+        self.assert_find_resources("H", ["H", "H-bundle"])
 
     def test_group(self):
         self.assert_find_resources("D", ["D", "D1", "D2"])
@@ -384,10 +384,10 @@ class FindResourcesToManage(TestCase):
         self.assert_find_resources("F-master", ["F-master", "F", "F1", "F2"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class FindResourcesToUnmanage(TestCase):
@@ -447,10 +447,10 @@ class FindResourcesToUnmanage(TestCase):
         self.assert_find_resources("F-master", ["F1", "F2"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class Manage(TestCase):
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 2308a42..cf91125 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -177,11 +177,11 @@ def find_element_by_tag_and_id(
         )
     )
 
-def create_subelement_id(context_element, suffix):
-    return find_unique_id(
-        context_element,
-        "{0}-{1}".format(context_element.get("id"), suffix)
-    )
+def create_subelement_id(context_element, suffix, id_provider=None):
+    proposed_id = "{0}-{1}".format(context_element.get("id"), suffix)
+    if id_provider:
+        return id_provider.allocate_id(proposed_id)
+    return find_unique_id(context_element, proposed_id)
 
 def check_new_id_applicable(tree, description, id):
     validate_id(id, description)
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
index 3a060b8..0c5f682 100644
--- a/pcs/lib/commands/resource.py
+++ b/pcs/lib/commands/resource.py
@@ -22,6 +22,7 @@ from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker.values import validate_id
 from pcs.lib.pacemaker.state import (
     ensure_resource_state,
+    info_resource_state,
     is_resource_managed,
     ResourceNotFound,
 )
@@ -31,7 +32,10 @@ from pcs.lib.resource_agent import(
 
 @contextmanager
 def resource_environment(
-    env, wait=False, wait_for_resource_ids=None, disabled_after_wait=False,
+    env,
+    wait=False,
+    wait_for_resource_ids=None,
+    resource_state_reporter=info_resource_state,
     required_cib_version=None
 ):
     env.ensure_wait_satisfiable(wait)
@@ -41,10 +45,19 @@ def resource_environment(
     if wait is not False and wait_for_resource_ids:
         state = env.get_cluster_state()
         env.report_processor.process_list([
-            ensure_resource_state(not disabled_after_wait, state, res_id)
+            resource_state_reporter(state, res_id)
             for res_id in wait_for_resource_ids
         ])
 
+def _ensure_disabled_after_wait(disabled_after_wait):
+    def inner(state, resource_id):
+        return ensure_resource_state(
+            not disabled_after_wait,
+            state,
+            resource_id
+        )
+    return inner
+
 def _validate_remote_connection(
     resource_agent, nodes_to_validate_against, resource_id, instance_attributes,
     allow_not_suitable_command
@@ -195,7 +208,11 @@ def create(
         env,
         wait,
         [resource_id],
-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        )
     ) as resources_section:
         _check_special_cases(
             env,
@@ -269,7 +286,7 @@ def _create_as_clone_common(
         env,
         wait,
         [resource_id],
-        (
+        _ensure_disabled_after_wait(
             ensure_disabled
             or
             resource.common.are_meta_disabled(meta_attributes)
@@ -353,7 +370,11 @@ def create_in_group(
         env,
         wait,
         [resource_id],
-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        )
     ) as resources_section:
         _check_special_cases(
             env,
@@ -433,7 +454,11 @@ def create_into_bundle(
         env,
         wait,
         [resource_id],
-        disabled_after_wait=ensure_disabled,
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        ),
         required_cib_version=(2, 8, 0)
     ) as resources_section:
         _check_special_cases(
@@ -465,8 +490,9 @@ def create_into_bundle(
 
 def bundle_create(
     env, bundle_id, container_type, container_options=None,
-    network_options=None, port_map=None, storage_map=None,
+    network_options=None, port_map=None, storage_map=None, meta_attributes=None,
     force_options=False,
+    ensure_disabled=False,
     wait=False,
 ):
     """
@@ -477,24 +503,32 @@ def bundle_create(
     string container_type -- container engine name (docker, lxc...)
     dict container_options -- container options
     dict network_options -- network options
-    list of dict port_map -- list of port mapping options
-    list of dict storage_map -- list of storage mapping options
+    list of dict port_map -- a list of port mapping options
+    list of dict storage_map -- a list of storage mapping options
+    dict meta_attributes -- bundle's meta attributes
     bool force_options -- return warnings instead of forceable errors
+    bool ensure_disabled -- set the bundle's target-role to "Stopped"
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     container_options = container_options or {}
     network_options = network_options or {}
     port_map = port_map or []
     storage_map = storage_map or []
+    meta_attributes = meta_attributes or {}
 
     with resource_environment(
         env,
         wait,
         [bundle_id],
-        # bundles are always enabled, currently there is no way to disable them
-        disabled_after_wait=False,
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        ),
         required_cib_version=(2, 8, 0)
     ) as resources_section:
+        # no need to run validations related to remote and guest nodes as those
+        # nodes can only be created from primitive resources
         id_provider = IdProvider(resources_section)
         env.report_processor.process_list(
             resource.bundle.validate_new(
@@ -505,10 +539,11 @@ def bundle_create(
                 network_options,
                 port_map,
                 storage_map,
+                # TODO meta attributes - there is no validation for now
                 force_options
             )
         )
-        resource.bundle.append_new(
+        bundle_element = resource.bundle.append_new(
             resources_section,
             id_provider,
             bundle_id,
@@ -516,13 +551,16 @@ def bundle_create(
             container_options,
             network_options,
             port_map,
-            storage_map
+            storage_map,
+            meta_attributes
         )
+        if ensure_disabled:
+            resource.common.disable(bundle_element)
 
 def bundle_update(
     env, bundle_id, container_options=None, network_options=None,
     port_map_add=None, port_map_remove=None, storage_map_add=None,
-    storage_map_remove=None,
+    storage_map_remove=None, meta_attributes=None,
     force_options=False,
     wait=False,
 ):
@@ -537,6 +575,7 @@ def bundle_update(
     list of string port_map_remove -- list of port mapping ids to remove
     list of dict storage_map_add -- list of storage mapping options to add
     list of string storage_map_remove -- list of storage mapping ids to remove
+    dict meta_attributes -- meta attributes to update
     bool force_options -- return warnings instead of forceable errors
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
@@ -546,15 +585,16 @@ def bundle_update(
     port_map_remove = port_map_remove or []
     storage_map_add = storage_map_add or []
     storage_map_remove = storage_map_remove or []
+    meta_attributes = meta_attributes or {}
 
     with resource_environment(
         env,
         wait,
         [bundle_id],
-        # bundles are always enabled, currently there is no way to disable them
-        disabled_after_wait=False,
         required_cib_version=(2, 8, 0)
     ) as resources_section:
+        # no need to run validations related to remote and guest nodes as those
+        # nodes can only be created from primitive resources
         id_provider = IdProvider(resources_section)
         bundle_element = find_element_by_tag_and_id(
             resource.bundle.TAG,
@@ -571,6 +611,7 @@ def bundle_update(
                 port_map_remove,
                 storage_map_add,
                 storage_map_remove,
+                # TODO meta attributes - there is no validation for now
                 force_options
             )
         )
@@ -582,7 +623,8 @@ def bundle_update(
             port_map_add,
             port_map_remove,
             storage_map_add,
-            storage_map_remove
+            storage_map_remove,
+            meta_attributes
         )
 
 def disable(env, resource_ids, wait):
@@ -593,7 +635,7 @@ def disable(env, resource_ids, wait):
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     with resource_environment(
-        env, wait, resource_ids, True
+        env, wait, resource_ids, _ensure_disabled_after_wait(True)
     ) as resources_section:
         resource_el_list = _find_resources_or_raise(
             resources_section,
@@ -615,7 +657,7 @@ def enable(env, resource_ids, wait):
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     with resource_environment(
-        env, wait, resource_ids, False
+        env, wait, resource_ids, _ensure_disabled_after_wait(False)
     ) as resources_section:
         resource_el_list = _find_resources_or_raise(
             resources_section,
@@ -642,7 +684,7 @@ def _resource_list_enable_disable(resource_el_list, func, cluster_state):
             report_list.append(
                 reports.id_not_found(
                     res_id,
-                    id_description="resource/clone/master/group"
+                    id_description="resource/clone/master/group/bundle"
                )
             )
     return report_list
@@ -735,7 +777,7 @@ def _find_resources_or_raise(
     resource_tags = (
         resource.clone.ALL_TAGS
         +
-        [resource.group.TAG, resource.primitive.TAG]
+        [resource.group.TAG, resource.primitive.TAG, resource.bundle.TAG]
     )
     for res_id in resource_ids:
         try:
@@ -745,7 +787,7 @@ def _find_resources_or_raise(
                         resource_tags,
                         resources_section,
                         res_id,
-                        id_description="resource/clone/master/group"
+                        id_description="resource/clone/master/group/bundle"
                     )
                 )
             )
diff --git a/pcs/lib/commands/test/resource/fixture.py b/pcs/lib/commands/test/resource/fixture.py
index f1fe09b..8d96dc9 100644
--- a/pcs/lib/commands/test/resource/fixture.py
+++ b/pcs/lib/commands/test/resource/fixture.py
@@ -145,7 +145,7 @@ def report_not_found(res_id, context_type=""):
             "context_type": context_type,
             "context_id": "",
             "id": res_id,
-            "id_description": "resource/clone/master/group",
+            "id_description": "resource/clone/master/group/bundle",
         },
         None
     )
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
index b9922d8..3bdeee9 100644
--- a/pcs/lib/commands/test/resource/test_bundle_create.py
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -40,7 +40,7 @@ class MinimalCreate(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {"image": "pcs:test", }
+                container_options={"image": "pcs:test", }
             ),
             self.fixture_resources_bundle_simple
         )
@@ -90,7 +90,7 @@ class MinimalCreate(CommonTest):
 
         resource.bundle_create(
             self.env, "B1", "docker",
-            {"image": "pcs:test", }
+            container_options={"image": "pcs:test", }
         )
 
         self.env.report_processor.assert_reports([
@@ -122,7 +122,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {"image": "pcs:test", }
+                container_options={"image": "pcs:test", }
             ),
             self.fixture_resources_bundle_simple
         )
@@ -132,7 +132,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "masters": "0",
                     "network": "extra network settings",
@@ -168,7 +168,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "replicas-per-host": "0",
                     "replicas": "0",
                     "masters": "-1",
@@ -226,7 +226,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "",
                 },
                 force_options=True
@@ -253,7 +253,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "extra": "option",
                 }
@@ -276,7 +276,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "extra": "option",
                 },
@@ -932,13 +932,61 @@ class CreateWithStorageMap(CommonTest):
         )
 
 
+class CreateWithMeta(CommonTest):
+    def test_success(self):
+        self.assert_command_effect(
+            self.fixture_cib_pre,
+            lambda: resource.bundle_create(
+                self.env, "B1", "docker",
+                container_options={"image": "pcs:test", },
+                meta_attributes={
+                    "target-role": "Stopped",
+                    "is-managed": "false",
+                }
+            ),
+            """
+                <resources>
+                    <bundle id="B1">
+                        <docker image="pcs:test" />
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair id="B1-meta_attributes-is-managed"
+                                name="is-managed" value="false" />
+                            <nvpair id="B1-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
+                    </bundle>
+                </resources>
+            """
+        )
+
+    def test_disabled(self):
+        self.assert_command_effect(
+            self.fixture_cib_pre,
+            lambda: resource.bundle_create(
+                self.env, "B1", "docker",
+                container_options={"image": "pcs:test", },
+                ensure_disabled=True
+            ),
+            """
+                <resources>
+                    <bundle id="B1">
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair id="B1-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
+                        <docker image="pcs:test" />
+                    </bundle>
+                </resources>
+            """
+        )
+
 class CreateWithAllOptions(CommonTest):
     def test_success(self):
         self.assert_command_effect(
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "masters": "0",
                     "network": "extra network settings",
@@ -947,13 +995,13 @@ class CreateWithAllOptions(CommonTest):
                     "replicas": "4",
                     "replicas-per-host": "2",
                 },
-                {
+                network_options={
                     "control-port": "12345",
                     "host-interface": "eth0",
                     "host-netmask": "24",
                     "ip-range-start": "192.168.100.200",
                 },
-                [
+                port_map=[
                     {
                         "port": "1001",
                     },
@@ -967,7 +1015,7 @@ class CreateWithAllOptions(CommonTest):
                         "range": "3000-3300",
                     },
                 ],
-                [
+                storage_map=[
                     {
                         "source-dir": "/tmp/docker1a",
                         "target-dir": "/tmp/docker1b",
@@ -1082,21 +1130,26 @@ class Wait(CommonTest):
         </resources>
     """
 
-    timeout = 10
+    fixture_resources_bundle_simple_disabled = """
+        <resources>
+            <bundle id="B1">
+                <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                </meta_attributes>
+                <docker image="pcs:test" />
+            </bundle>
+        </resources>
+    """
 
-    def fixture_calls_initial(self):
-        return (
-            fixture.call_wait_supported() +
-            fixture.calls_cib(
-                self.fixture_cib_pre,
-                self.fixture_resources_bundle_simple,
-                cib_base_file=self.cib_base_file,
-            )
-        )
+    timeout = 10
 
-    def simple_bundle_create(self, wait=False):
+    def simple_bundle_create(self, wait=False, disabled=False):
         return resource.bundle_create(
-            self.env, "B1", "docker", {"image": "pcs:test"}, wait=wait,
+            self.env, "B1", "docker",
+            container_options={"image": "pcs:test"},
+            ensure_disabled=disabled,
+            wait=wait,
         )
 
     def test_wait_fail(self):
@@ -1108,7 +1161,14 @@ class Wait(CommonTest):
             """
         )
         self.runner.set_runs(
-            self.fixture_calls_initial() +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
             fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
         )
         assert_raise_library_error(
@@ -1122,8 +1182,16 @@ class Wait(CommonTest):
     @skip_unless_pacemaker_supports_bundle
     def test_wait_ok_run_ok(self):
         self.runner.set_runs(
-            self.fixture_calls_initial() +
-            fixture.call_wait(self.timeout) +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
             fixture.call_status(fixture.state_complete(
                 self.fixture_status_running
             ))
@@ -1139,8 +1207,16 @@ class Wait(CommonTest):
     @skip_unless_pacemaker_supports_bundle
     def test_wait_ok_run_fail(self):
         self.runner.set_runs(
-            self.fixture_calls_initial() +
-            fixture.call_wait(self.timeout) +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
             fixture.call_status(fixture.state_complete(
                 self.fixture_status_not_running
             ))
@@ -1150,3 +1226,48 @@ class Wait(CommonTest):
             fixture.report_resource_not_running("B1", severities.ERROR),
         )
         self.runner.assert_everything_launched()
+
+    @skip_unless_pacemaker_supports_bundle
+    def test_disabled_wait_ok_run_ok(self):
+        self.runner.set_runs(
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple_disabled,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
+            fixture.call_status(fixture.state_complete(
+                self.fixture_status_not_running
+            ))
+        )
+        self.simple_bundle_create(self.timeout, disabled=True)
+        self.runner.assert_everything_launched()
+
+    @skip_unless_pacemaker_supports_bundle
+    def test_disabled_wait_ok_run_fail(self):
+        self.runner.set_runs(
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple_disabled,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
+            fixture.call_status(fixture.state_complete(
+                self.fixture_status_running
+            ))
+        )
+        assert_raise_library_error(
+            lambda: self.simple_bundle_create(self.timeout, disabled=True),
+            fixture.report_resource_running(
+                "B1", {"Started": ["node1", "node2"]}, severities.ERROR
+            )
+        )
+        self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
index 55cfa7b..7a1ee49 100644
--- a/pcs/lib/commands/test/resource/test_bundle_update.py
+++ b/pcs/lib/commands/test/resource/test_bundle_update.py
@@ -709,6 +709,96 @@ class StorageMap(CommonTest):
         self.runner.assert_everything_launched()
 
 
+class Meta(CommonTest):
+    fixture_no_meta = """
+        <resources>
+            <bundle id="B1">
+                <docker image="pcs:test" masters="3" replicas="6"/>
+            </bundle>
+        </resources>
+    """
+
+    fixture_meta_stopped = """
+        <resources>
+            <bundle id="B1">
+                <meta_attributes id="B1-meta_attributes">
+                <nvpair id="B1-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+                </meta_attributes>
+                <docker image="pcs:test" masters="3" replicas="6"/>
+            </bundle>
+        </resources>
+    """
+
+    def test_add_meta_element(self):
+        self.assert_command_effect(
+            self.fixture_no_meta,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "target-role": "Stopped",
+                }
+            ),
+            self.fixture_meta_stopped
+        )
+
+    def test_remove_meta_element(self):
+        self.assert_command_effect(
+            self.fixture_meta_stopped,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "target-role": "",
+                }
+            ),
+            self.fixture_no_meta
+        )
+
+    def test_change_meta(self):
+        fixture_cib_pre = """
+            <resources>
+                <bundle id="B1">
+                    <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                    <nvpair id="B1-meta_attributes-priority"
+                        name="priority" value="15" />
+                    <nvpair id="B1-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                    </meta_attributes>
+                    <docker image="pcs:test" masters="3" replicas="6"/>
+                </bundle>
+            </resources>
+        """
+        fixture_cib_post = """
+            <resources>
+                <bundle id="B1">
+                    <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                    <nvpair id="B1-meta_attributes-priority"
+                        name="priority" value="10" />
+                    <nvpair id="B1-meta_attributes-resource-stickiness"
+                        name="resource-stickiness" value="100" />
+                    </meta_attributes>
+                    <docker image="pcs:test" masters="3" replicas="6"/>
+                </bundle>
+            </resources>
+        """
+        self.assert_command_effect(
+            fixture_cib_pre,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "priority": "10",
+                    "resource-stickiness": "100",
+                    "is-managed": "",
+                }
+            ),
+            fixture_cib_post
+        )
+
+
 class Wait(CommonTest):
     fixture_status_running = """
         <resources>
@@ -794,7 +884,7 @@ class Wait(CommonTest):
         self.runner.assert_everything_launched()
 
     @skip_unless_pacemaker_supports_bundle
-    def test_wait_ok_run_ok(self):
+    def test_wait_ok_running(self):
         self.runner.set_runs(
             self.fixture_calls_initial() +
             fixture.call_wait(self.timeout) +
@@ -811,7 +901,7 @@ class Wait(CommonTest):
         self.runner.assert_everything_launched()
 
     @skip_unless_pacemaker_supports_bundle
-    def test_wait_ok_run_fail(self):
+    def test_wait_ok_not_running(self):
         self.runner.set_runs(
             self.fixture_calls_initial() +
             fixture.call_wait(self.timeout) +
@@ -819,8 +909,8 @@ class Wait(CommonTest):
                 self.fixture_status_not_running
             ))
         )
-        assert_raise_library_error(
-            lambda: self.simple_bundle_update(self.timeout),
-            fixture.report_resource_not_running("B1", severities.ERROR),
-        )
+        self.simple_bundle_update(self.timeout)
+        self.env.report_processor.assert_reports([
+            fixture.report_resource_not_running("B1", severities.INFO),
+        ])
         self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_enable_disable.py b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
index 91ac068..b03740b 100644
--- a/pcs/lib/commands/test/resource/test_resource_enable_disable.py
+++ b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
@@ -469,6 +469,35 @@ fixture_bundle_cib_disabled_primitive = """
         </bundle>
     </resources>
 """
+fixture_bundle_cib_disabled_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy" />
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_disabled_both = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                </meta_attributes>
+            </primitive>
+        </bundle>
+    </resources>
+"""
 fixture_bundle_status_managed = """
     <resources>
         <bundle id="A-bundle" type="docker" image="pcmktest:http"
@@ -486,7 +515,7 @@ fixture_bundle_status_managed = """
 fixture_bundle_status_unmanaged = """
     <resources>
         <bundle id="A-bundle" type="docker" image="pcmktest:http"
-            unique="false" managed="true" failed="false"
+            unique="false" managed="false" failed="false"
         >
             <replica id="0">
                 <resource id="A" managed="false" />
@@ -1460,17 +1489,12 @@ class DisableBundle(ResourceWithStateTest):
         )
 
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_enabled)
-            )
-        )
-
-        assert_raise_library_error(
+        self.assert_command_effect(
+            fixture_bundle_cib_enabled,
+            fixture_bundle_status_managed,
             lambda: resource.disable(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+            fixture_bundle_cib_disabled_bundle
         )
-        self.runner.assert_everything_launched()
 
     def test_primitive_unmanaged(self):
         self.assert_command_effect(
@@ -1483,6 +1507,17 @@ class DisableBundle(ResourceWithStateTest):
             ]
         )
 
+    def test_bundle_unmanaged(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_enabled,
+            fixture_bundle_status_unmanaged,
+            lambda: resource.disable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_disabled_bundle,
+            reports=[
+                fixture_report_unmanaged("A-bundle"),
+            ]
+        )
+
 
 @skip_unless_pacemaker_supports_bundle
 class EnableBundle(ResourceWithStateTest):
@@ -1494,18 +1529,29 @@ class EnableBundle(ResourceWithStateTest):
             fixture_bundle_cib_enabled
         )
 
+    def test_primitive_disabled_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_both,
+            fixture_bundle_status_managed,
+            lambda: resource.enable(self.env, ["A"], False),
+            fixture_bundle_cib_enabled
+        )
+
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_enabled)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_bundle,
+            fixture_bundle_status_managed,
+            lambda: resource.enable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_enabled
         )
 
-        assert_raise_library_error(
+    def test_bundle_disabled_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_both,
+            fixture_bundle_status_managed,
             lambda: resource.enable(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+            fixture_bundle_cib_enabled
         )
-        self.runner.assert_everything_launched()
 
     def test_primitive_unmanaged(self):
         self.assert_command_effect(
@@ -1515,5 +1561,18 @@ class EnableBundle(ResourceWithStateTest):
             fixture_bundle_cib_enabled,
             reports=[
                 fixture_report_unmanaged("A"),
+                fixture_report_unmanaged("A-bundle"),
+            ]
+        )
+
+    def test_bundle_unmanaged(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_primitive,
+            fixture_bundle_status_unmanaged,
+            lambda: resource.enable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_enabled,
+            reports=[
+                fixture_report_unmanaged("A-bundle"),
+                fixture_report_unmanaged("A"),
             ]
         )
diff --git a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
index 6d8c787..95b44bc 100644
--- a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
+++ b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
@@ -517,6 +517,26 @@ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled = """
     </resources>
 """
 
+
+fixture_bundle_empty_cib_managed = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+        </bundle>
+    </resources>
+"""
+fixture_bundle_empty_cib_unmanaged_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+        </bundle>
+    </resources>
+"""
+
 fixture_bundle_cib_managed = """
     <resources>
         <bundle id="A-bundle">
@@ -526,7 +546,19 @@ fixture_bundle_cib_managed = """
         </bundle>
     </resources>
 """
-
+fixture_bundle_cib_unmanaged_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+            </primitive>
+        </bundle>
+    </resources>
+"""
 fixture_bundle_cib_unmanaged_primitive = """
     <resources>
         <bundle id="A-bundle">
@@ -540,6 +572,78 @@ fixture_bundle_cib_unmanaged_primitive = """
         </bundle>
     </resources>
 """
+fixture_bundle_cib_unmanaged_both = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+
+fixture_bundle_cib_managed_op_enabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_unmanaged_primitive_op_disabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor" enabled="false"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_unmanaged_both_op_disabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor" enabled="false"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
 
 def fixture_report_no_monitors(resource):
     return (
@@ -852,17 +956,18 @@ class UnmanageBundle(ResourceWithoutStateTest):
         )
 
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_managed)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_unmanaged_both
         )
 
-        assert_raise_library_error(
-            lambda: resource.unmanage(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+    def test_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"]),
+            fixture_bundle_empty_cib_unmanaged_bundle
         )
-        self.runner.assert_everything_launched()
 
 
 class ManageBundle(ResourceWithoutStateTest):
@@ -873,18 +978,47 @@ class ManageBundle(ResourceWithoutStateTest):
             fixture_bundle_cib_managed,
         )
 
+    def test_primitive_unmanaged_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_primitive_unmanaged_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_both,
+            lambda: resource.manage(self.env, ["A"]),
+            fixture_bundle_cib_managed,
+        )
+
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_unmanaged_primitive)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
         )
 
-        assert_raise_library_error(
-            lambda: resource.manage(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+    def test_bundle_unmanaged_primitive(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_primitive,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_bundle_unmanaged_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_both,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_empty_cib_managed
         )
-        self.runner.assert_everything_launched()
 
 
 class MoreResources(ResourceWithoutStateTest):
@@ -1090,3 +1224,24 @@ class WithMonitor(ResourceWithoutStateTest):
             lambda: resource.unmanage(self.env, ["A1"], True),
             fixture_clone_group_cib_unmanaged_primitive_op_disabled
         )
+
+    def test_unmanage_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_managed_op_enabled,
+            lambda: resource.unmanage(self.env, ["A-bundle"], True),
+            fixture_bundle_cib_unmanaged_both_op_disabled
+        )
+
+    def test_unmanage_in_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_managed_op_enabled,
+            lambda: resource.unmanage(self.env, ["A"], True),
+            fixture_bundle_cib_unmanaged_primitive_op_disabled
+        )
+
+    def test_unmanage_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"], True),
+            fixture_bundle_empty_cib_unmanaged_bundle
+        )
diff --git a/pcs/lib/pacemaker/state.py b/pcs/lib/pacemaker/state.py
index 71809db..be3e7ad 100644
--- a/pcs/lib/pacemaker/state.py
+++ b/pcs/lib/pacemaker/state.py
@@ -201,6 +201,25 @@ def _get_primitive_roles_with_nodes(primitive_el_list):
         for role, nodes in roles_with_nodes.items()
     ])
 
+def info_resource_state(cluster_state, resource_id):
+    roles_with_nodes = _get_primitive_roles_with_nodes(
+        _get_primitives_for_state_check(
+            cluster_state,
+            resource_id,
+            expected_running=True
+        )
+    )
+    if not roles_with_nodes:
+        return reports.resource_does_not_run(
+            resource_id,
+            severities.INFO
+        )
+    return reports.resource_running_on_nodes(
+        resource_id,
+        roles_with_nodes,
+        severities.INFO
+    )
+
 def ensure_resource_state(expected_running, cluster_state, resource_id):
     roles_with_nodes = _get_primitive_roles_with_nodes(
         _get_primitives_for_state_check(
@@ -244,18 +263,25 @@ def is_resource_managed(cluster_state, resource_id):
         for primitive in primitive_list:
             if is_false(primitive.attrib.get("managed", "")):
                 return False
-            clone = find_parent(primitive, ["clone"])
-            if clone is not None and is_false(clone.attrib.get("managed", "")):
+            parent = find_parent(primitive, ["clone", "bundle"])
+            if (
+                parent is not None
+                and
+                is_false(parent.attrib.get("managed", ""))
+            ):
                 return False
         return True
 
-    clone_list = cluster_state.xpath(
-        """.//clone[@id="{0}"]""".format(resource_id)
+    parent_list = cluster_state.xpath("""
+        .//clone[@id="{0}"]
+        |
+        .//bundle[@id="{0}"]
+        """.format(resource_id)
     )
-    for clone in clone_list:
-        if is_false(clone.attrib.get("managed", "")):
+    for parent in parent_list:
+        if is_false(parent.attrib.get("managed", "")):
             return False
-        for primitive in clone.xpath(".//resource"):
+        for primitive in parent.xpath(".//resource"):
             if is_false(primitive.attrib.get("managed", "")):
                 return False
         return True
diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
index a29eddf..5de9426 100644
--- a/pcs/lib/pacemaker/test/test_state.py
+++ b/pcs/lib/pacemaker/test/test_state.py
@@ -491,7 +491,7 @@ class GetPrimitivesForStateCheck(TestCase):
         self.assert_primitives("B2-R2", ["B2-R2", "B2-R2"], False)
 
 
-class EnsureResourceState(TestCase):
+class CommonResourceState(TestCase):
     resource_id = "R"
     def setUp(self):
         self.cluster_state = "state"
@@ -526,6 +526,8 @@ class EnsureResourceState(TestCase):
             "resource_id": self.resource_id
         })
 
+
+class EnsureResourceState(CommonResourceState):
     def assert_running_info_transform(self, run_info, report, expected_running):
         self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
         self.get_primitive_roles_with_nodes.return_value = run_info
@@ -575,6 +577,35 @@ class EnsureResourceState(TestCase):
         )
 
 
+class InfoResourceState(CommonResourceState):
+    def assert_running_info_transform(self, run_info, report):
+        self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
+        self.get_primitive_roles_with_nodes.return_value = run_info
+        assert_report_item_equal(
+            state.info_resource_state(self.cluster_state, self.resource_id),
+            report
+        )
+        self.get_primitives_for_state_check.assert_called_once_with(
+            self.cluster_state,
+            self.resource_id,
+            expected_running=True
+        )
+        self.get_primitive_roles_with_nodes.assert_called_once_with(
+            ["elem1", "elem2"]
+        )
+
+    def test_report_info_running(self):
+        self.assert_running_info_transform(
+            self.fixture_running_state_info(),
+            self.fixture_running_report(severities.INFO)
+        )
+    def test_report_info_not_running(self):
+        self.assert_running_info_transform(
+            [],
+            self.fixture_not_running_report(severities.INFO)
+        )
+
+
 class IsResourceManaged(TestCase):
     status_xml = etree.fromstring("""
         <resources>
@@ -733,6 +764,60 @@ class IsResourceManaged(TestCase):
                     <resource id="R38:1" managed="false" />
                 </group>
             </clone>
+
+            <bundle id="B1" managed="true" />
+            <bundle id="B2" managed="false" />
+
+            <bundle id="B3" managed="true">
+                <replica id="0">
+                    <resource id="R39" managed="true" />
+                    <resource id="R40" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R39" managed="true" />
+                    <resource id="R40" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B4" managed="false">
+                <replica id="0">
+                    <resource id="R41" managed="true" />
+                    <resource id="R42" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R41" managed="true" />
+                    <resource id="R42" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B5" managed="true">
+                <replica id="0">
+                    <resource id="R43" managed="false" />
+                    <resource id="R44" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R43" managed="false" />
+                    <resource id="R44" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B6" managed="true">
+                <replica id="0">
+                    <resource id="R45" managed="true" />
+                    <resource id="R46" managed="false" />
+                </replica>
+                <replica id="1">
+                    <resource id="R45" managed="true" />
+                    <resource id="R46" managed="false" />
+                </replica>
+            </bundle>
+            <bundle id="B7" managed="false">
+                <replica id="0">
+                    <resource id="R47" managed="false" />
+                    <resource id="R48" managed="false" />
+                </replica>
+                <replica id="1">
+                    <resource id="R47" managed="false" />
+                    <resource id="R48" managed="false" />
+                </replica>
+            </bundle>
         </resources>
     """)
 
@@ -856,3 +941,24 @@ class IsResourceManaged(TestCase):
         self.assert_managed("R36", False)
         self.assert_managed("R37", False)
         self.assert_managed("R38", False)
+
+    def test_bundle(self):
+        self.assert_managed("B1", True)
+        self.assert_managed("B2", False)
+        self.assert_managed("B3", True)
+        self.assert_managed("B4", False)
+        self.assert_managed("B5", False)
+        self.assert_managed("B6", False)
+        self.assert_managed("B7", False)
+
+    def test_primitive_in_bundle(self):
+        self.assert_managed("R39", True)
+        self.assert_managed("R40", True)
+        self.assert_managed("R41", False)
+        self.assert_managed("R42", False)
+        self.assert_managed("R43", False)
+        self.assert_managed("R44", True)
+        self.assert_managed("R45", True)
+        self.assert_managed("R46", False)
+        self.assert_managed("R47", False)
+        self.assert_managed("R48", False)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 446e7b3..20b5c2e 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -162,10 +162,10 @@ Remove the clone which contains the specified group or resource (the resource or
 master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
 Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
 .TP
-bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [\fB\-\-wait\fR[=n]]
-Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
+bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
 .TP
-bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [\fB\-\-wait\fR[=n]]
+bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [meta <meta options>] [\fB\-\-wait\fR[=n]]
 Add, remove or change options to specified bundle. If you wish to update a resource encapsulated in the bundle, use the 'pcs resource update' command instead and specify the resource id.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 manage <resource id>... [\fB\-\-monitor\fR]
diff --git a/pcs/resource.py b/pcs/resource.py
index dc6da13..467faa5 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -20,7 +20,7 @@ from pcs import (
 )
 from pcs.settings import pacemaker_wait_timeout_status as \
     PACEMAKER_WAIT_TIMEOUT_STATUS
-import pcs.lib.cib.acl as lib_acl
+from pcs.cli.common.console_report import error, warn
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.common.parse_args import prepare_options
 from pcs.cli.resource.parse_args import (
@@ -28,16 +28,21 @@ from pcs.cli.resource.parse_args import (
     parse_bundle_update_options,
     parse_create as parse_create_args,
 )
-from pcs.lib.errors import LibraryError
+import pcs.lib.cib.acl as lib_acl
 from pcs.lib.cib.resource import guest_node
-import pcs.lib.pacemaker.live as lib_pacemaker
-from pcs.lib.pacemaker.values import timeout_to_seconds
-import pcs.lib.resource_agent as lib_ra
-from pcs.cli.common.console_report import error, warn
 from pcs.lib.commands.resource import(
     _validate_guest_change,
     _get_nodes_to_validate_against,
 )
+from pcs.lib.errors import LibraryError
+import pcs.lib.pacemaker.live as lib_pacemaker
+from pcs.lib.pacemaker.state import (
+    get_cluster_state_dom,
+    _get_primitive_roles_with_nodes,
+    _get_primitives_for_state_check,
+)
+from pcs.lib.pacemaker.values import timeout_to_seconds
+import pcs.lib.resource_agent as lib_ra
 
 
 RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
@@ -1432,6 +1437,18 @@ def resource_master_create(dom, argv, update=False, master_id=None):
     return dom, master_element.getAttribute("id")
 
 def resource_remove(resource_id, output=True, is_remove_remote_context=False):
+    def is_bundle_running(bundle_id):
+        roles_with_nodes = _get_primitive_roles_with_nodes(
+            _get_primitives_for_state_check(
+                get_cluster_state_dom(
+                    lib_pacemaker.get_cluster_status_xml(utils.cmd_runner())
+                ),
+                bundle_id,
+                expected_running=True
+            )
+        )
+        return True if roles_with_nodes else False
+
     dom = utils.get_cib_dom()
     # if resource is a clone or a master, work with its child instead
     cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
@@ -1441,6 +1458,40 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
     bundle = utils.dom_get_bundle(dom, resource_id)
     if bundle is not None:
         primitive_el = utils.dom_get_resource_bundle(bundle)
+        if primitive_el is None:
+            print("Deleting bundle '{0}'".format(resource_id))
+        else:
+            print(
+                "Deleting bundle '{0}' and its inner resource '{1}'".format(
+                    resource_id,
+                    primitive_el.getAttribute("id")
+                )
+            )
+
+        if (
+            "--force" not in utils.pcs_options
+            and
+            not utils.usefile
+            and
+            is_bundle_running(resource_id)
+        ):
+            sys.stdout.write("Stopping bundle '{0}'... ".format(resource_id))
+            sys.stdout.flush()
+            lib = utils.get_library_wrapper()
+            lib.resource.disable([resource_id], False)
+            output, retval = utils.run(["crm_resource", "--wait"])
+            # pacemaker which supports bundles supports --wait as well
+            if is_bundle_running(resource_id):
+                msg = [
+                    "Unable to stop: %s before deleting "
+                    "(re-run with --force to force deletion)"
+                    % resource_id
+                ]
+                if retval != 0 and output:
+                    msg.append("\n" + output)
+                utils.err("\n".join(msg).strip())
+            print("Stopped")
+
         if primitive_el is not None:
             resource_remove(primitive_el.getAttribute("id"))
         utils.replace_cib_configuration(
@@ -1498,7 +1549,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
             resource_remove(res.getAttribute("id"))
         sys.exit(0)
 
-    # now we know resource is not a group, a clone nor a master
+    # now we know resource is not a group, a clone, a master nor a bundle
     # because of the conditions above
     if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
         utils.err("Resource '{0}' does not exist.".format(resource_id))
@@ -1517,7 +1568,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
         and
         utils.resource_running_on(resource_id)["is_running"]
     ):
-        sys.stdout.write("Attempting to stop: "+ resource_id + "...")
+        sys.stdout.write("Attempting to stop: "+ resource_id + "... ")
         sys.stdout.flush()
         lib = utils.get_library_wrapper()
         # we are not using wait from disable command, because if wait is not
@@ -2246,6 +2297,7 @@ def print_node(node, tab = 0):
             node.findall("storage/storage-mapping"),
             spaces + " "
         )
+        print_meta_vars_string(node, spaces)
         for child in node:
             print_node(child, tab + 1)
         return
@@ -2675,12 +2727,14 @@ def resource_bundle_create_cmd(lib, argv, modifiers):
     lib.resource.bundle_create(
         bundle_id,
         parts["container_type"],
-        parts["container"],
-        parts["network"],
-        parts["port_map"],
-        parts["storage_map"],
-        modifiers["force"],
-        modifiers["wait"]
+        container_options=parts["container"],
+        network_options=parts["network"],
+        port_map=parts["port_map"],
+        storage_map=parts["storage_map"],
+        meta_attributes=parts["meta"],
+        force_options=modifiers["force"],
+        ensure_disabled=modifiers["disabled"],
+        wait=modifiers["wait"]
     )
 
 def resource_bundle_update_cmd(lib, argv, modifiers):
@@ -2691,12 +2745,13 @@ def resource_bundle_update_cmd(lib, argv, modifiers):
     parts = parse_bundle_update_options(argv[1:])
     lib.resource.bundle_update(
         bundle_id,
-        parts["container"],
-        parts["network"],
-        parts["port_map_add"],
-        parts["port_map_remove"],
-        parts["storage_map_add"],
-        parts["storage_map_remove"],
-        modifiers["force"],
-        modifiers["wait"]
+        container_options=parts["container"],
+        network_options=parts["network"],
+        port_map_add=parts["port_map_add"],
+        port_map_remove=parts["port_map_remove"],
+        storage_map_add=parts["storage_map_add"],
+        storage_map_remove=parts["storage_map_remove"],
+        meta_attributes=parts["meta"],
+        force_options=modifiers["force"],
+        wait=modifiers["wait"]
     )
diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
index d8c97c6..29e4339 100644
--- a/pcs/test/cib_resource/test_bundle.py
+++ b/pcs/test/cib_resource/test_bundle.py
@@ -75,6 +75,7 @@ class BundleCreate(BundleCreateCommon):
                 resource bundle create B1
                 container replicas=4 replicas-per-host=2 run-command=/bin/true
                 port-map port=1001
+                meta target-role=Stopped
                 network control-port=12345 host-interface=eth0 host-netmask=24
                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
                 port-map range=3000-3300
@@ -83,6 +84,7 @@ class BundleCreate(BundleCreateCommon):
                 storage-map id=B1-storage-map source-dir=/tmp/docker2a
                     target-dir=/tmp/docker2b
                 container image=pcs:test masters=0
+                meta is-managed=false
                 storage-map source-dir-root=/tmp/docker3a
                     target-dir=/tmp/docker3b
                 storage-map id=B1-port-map-1001-1 source-dir-root=/tmp/docker4a
@@ -140,6 +142,18 @@ class BundleCreate(BundleCreateCommon):
                                 target-dir="/tmp/docker4b"
                             />
                         </storage>
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair
+                                id="B1-meta_attributes-is-managed"
+                                name="is-managed"
+                                value="false"
+                            />
+                            <nvpair
+                                id="B1-meta_attributes-target-role"
+                                name="target-role"
+                                value="Stopped"
+                            />
+                        </meta_attributes>
                     </bundle>
                 </resources>
             """
@@ -215,6 +229,9 @@ class BundleCreate(BundleCreateCommon):
     def test_empty_port_map(self):
         self.assert_no_options("port-map")
 
+    def test_empty_meta(self):
+        self.assert_no_options("meta")
+
 
 @skip_unless_pacemaker_supports_bundle
 class BundleUpdate(BundleCreateCommon):
@@ -239,6 +256,7 @@ class BundleUpdate(BundleCreateCommon):
                 "storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b "
                 "storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b "
                 "storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b "
+                "meta priority=15 resource-stickiness=100 is-managed=false "
             ).format(name)
         )
 
@@ -282,6 +300,7 @@ class BundleUpdate(BundleCreateCommon):
                 port-map add internal-port=1003 port=2003
                 storage-map remove B-storage-map B-storage-map-2
                 storage-map add source-dir=/tmp/docker4a target-dir=/tmp/docker4b
+                meta priority=10 is-managed= target-role=Stopped
             """,
             """
                 <resources>
@@ -319,6 +338,14 @@ class BundleUpdate(BundleCreateCommon):
                                 target-dir="/tmp/docker4b"
                             />
                         </storage>
+                        <meta_attributes id="B-meta_attributes">
+                            <nvpair id="B-meta_attributes-priority"
+                                name="priority" value="10" />
+                            <nvpair id="B-meta_attributes-resource-stickiness"
+                                name="resource-stickiness" value="100" />
+                            <nvpair id="B-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
                     </bundle>
                 </resources>
             """
@@ -373,6 +400,9 @@ class BundleUpdate(BundleCreateCommon):
     def test_empty_port_map(self):
         self.assert_no_options("port-map")
 
+    def test_empty_meta(self):
+        self.assert_no_options("meta")
+
 
 @skip_unless_pacemaker_supports_bundle
 class BundleShow(TestCase, AssertPcsMixin):
@@ -463,6 +493,35 @@ class BundleShow(TestCase, AssertPcsMixin):
             """
         ))
 
+    def test_meta(self):
+        self.assert_pcs_success(
+            "resource bundle create B1 container image=pcs:test --disabled"
+        )
+        self.assert_pcs_success("resource show B1", outdent(
+            # pylint:disable=trailing-whitespace
+            """\
+             Bundle: B1
+              Docker: image=pcs:test
+              Meta Attrs: target-role=Stopped 
+            """
+        ))
+
+    def test_resource(self):
+        self.assert_pcs_success(
+            "resource bundle create B1 container image=pcs:test"
+        )
+        self.assert_pcs_success(
+            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
+        )
+        self.assert_pcs_success("resource show B1", outdent(
+            """\
+             Bundle: B1
+              Docker: image=pcs:test
+              Resource: A (class=ocf provider=pacemaker type=Dummy)
+               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
+            """
+        ))
+
     def test_all(self):
         self.assert_pcs_success(
             """
@@ -474,9 +533,14 @@ class BundleShow(TestCase, AssertPcsMixin):
                 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
                 storage-map id=my-storage-map source-dir=/tmp/docker2a
                     target-dir=/tmp/docker2b
+                meta target-role=Stopped is-managed=false
             """
         )
+        self.assert_pcs_success(
+            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
+        )
         self.assert_pcs_success("resource show B1", outdent(
+            # pylint:disable=trailing-whitespace
             """\
              Bundle: B1
               Docker: image=pcs:test masters=2 options="a b c" replicas=4
@@ -487,5 +551,8 @@ class BundleShow(TestCase, AssertPcsMixin):
               Storage Mapping:
                source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map)
                source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map)
+              Meta Attrs: is-managed=false target-role=Stopped 
+              Resource: A (class=ocf provider=pacemaker type=Dummy)
+               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
             """
         ))
diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
index 5b78646..2a87cd3 100644
--- a/pcs/test/cib_resource/test_manage_unmanage.py
+++ b/pcs/test/cib_resource/test_manage_unmanage.py
@@ -18,6 +18,7 @@ class ManageUnmanage(
     TestCase,
     get_assert_pcs_effect_mixin(
         lambda cib: etree.tostring(
+            # pylint:disable=undefined-variable
             etree.parse(cib).findall(".//resources")[0]
         )
     )
@@ -234,7 +235,7 @@ class ManageUnmanage(
 
         self.assert_pcs_fail(
             "resource unmanage A B",
-            "Error: resource/clone/master/group 'B' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
         )
         self.assert_resources_xml_in_cib(
             """
@@ -255,7 +256,7 @@ class ManageUnmanage(
 
         self.assert_pcs_fail(
             "resource manage A B",
-            "Error: resource/clone/master/group 'B' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
         )
         self.assert_resources_xml_in_cib(
             """
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 96eae8f..4bdc194 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -8,6 +8,7 @@ from __future__ import (
 from lxml import etree
 import re
 import shutil
+from textwrap import dedent
 
 from pcs.test.tools import pcs_unittest as unittest
 from pcs.test.tools.assertions import AssertPcsMixin
@@ -3321,11 +3322,11 @@ Error: Cannot remove more than one resource from cloned group
 
         # bad resource name
         o,r = pcs(temp_cib, "resource enable NoExist")
-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
+        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
         assert r == 1
 
         o,r = pcs(temp_cib, "resource disable NoExist")
-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
+        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
         assert r == 1
 
         # cloned group
@@ -3829,7 +3830,7 @@ Error: Cannot remove more than one resource from cloned group
 
         self.assert_pcs_fail_regardless_of_force(
             "resource enable dummy3 dummyX",
-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
         )
         self.assert_pcs_success(
             "resource show --full",
@@ -3849,7 +3850,7 @@ Error: Cannot remove more than one resource from cloned group
 
         self.assert_pcs_fail_regardless_of_force(
             "resource disable dummy1 dummyX",
-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
         )
         self.assert_pcs_success(
             "resource show --full",
@@ -4719,7 +4720,11 @@ class BundleCommon(
 class BundleDeleteTest(BundleCommon):
     def test_without_primitive(self):
         self.fixture_bundle("B")
-        self.assert_effect("resource delete B", "<resources/>")
+        self.assert_effect(
+            "resource delete B",
+            "<resources/>",
+            "Deleting bundle 'B'\n"
+        )
 
     def test_with_primitive(self):
         self.fixture_bundle("B")
@@ -4727,7 +4732,10 @@ class BundleDeleteTest(BundleCommon):
         self.assert_effect(
             "resource delete B",
             "<resources/>",
-            "Deleting Resource - R\n",
+            dedent("""\
+                Deleting bundle 'B' and its inner resource 'R'
+                Deleting Resource - R
+            """),
         )
 
     def test_remove_primitive(self):
@@ -4823,30 +4831,26 @@ class BundleCloneMaster(BundleCommon):
 class BundleMiscCommands(BundleCommon):
     def test_resource_enable_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource enable B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource enable B"
         )
 
     def test_resource_disable_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource disable B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource disable B"
         )
 
     def test_resource_manage_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource manage B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource manage B"
         )
 
     def test_resource_unmanage_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource unmanage B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource unmanage B"
         )
 
     def test_op_add(self):
diff --git a/pcs/usage.py b/pcs/usage.py
index d2262a6..75cb118 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -430,10 +430,12 @@ Commands:
 
     bundle create <bundle id> [container [<container type>] <container options>]
             [network <network options>] [port-map <port options>]...
-            [storage-map <storage options>]... [--wait[=n]]
+            [storage-map <storage options>]... [meta <meta options>]
+            [--disabled] [--wait[=n]]
         Create a new bundle encapsulating no resources. The bundle can be used
         either as it is or a resource may be put into it at any time.
         If the container type is not specified, it defaults to 'docker'.
+        If --disabled is specified, the bundle is not started automatically.
         If --wait is specified, pcs will wait up to 'n' seconds for the bundle
         to start and then return 0 on success or 1 on error. If 'n' is not
         specified it defaults to 60 minutes.
@@ -442,13 +444,14 @@ Commands:
             [network <network options>]
             [port-map (add <port options>) | (remove <id>...)]...
             [storage-map (add <storage options>) | (remove <id>...)]...
+            [meta <meta options>]
             [--wait[=n]]
         Add, remove or change options to specified bundle. If you wish to update
         a resource encapsulated in the bundle, use the 'pcs resource update'
-        command instead and specify the resource id.  If --wait is specified,
+        command instead and specify the resource id. If --wait is specified,
         pcs will wait up to 'n' seconds for the operation to finish (including
         moving resources if appropriate) and then return 0 on success or 1 on
-        error.  If 'n' is not specified it defaults to 60 minutes.
+        error. If 'n' is not specified it defaults to 60 minutes.
 
     manage <resource id>... [--monitor]
         Set resources listed to managed mode (default). If --monitor is
-- 
1.8.3.1