~openstack-charmers/charms/trusty/ceph/next

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
pause-health:
  description: Pause ceph health operations across the entire ceph cluster
resume-health:
  description: Resume ceph health operations across the entire ceph cluster
pause:
  description: |
    CAUTION - Set the local osd units in the charm to 'out' but does not stop
    the osds.  Unless the osd cluster is set to noout (see below), this removes
    them from the ceph cluster and forces ceph to migrate the PGs to other OSDs
    in the cluster.  See the following.

    http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd
    "Do not let your cluster reach its full ratio when removing an OSD.
    Removing OSDs could cause the cluster to reach or exceed its full ratio."
    Also note that for small clusters you may encounter the corner case where
    some PGs remain stuck in the active+remapped state. Refer to the above link
    on how to resolve this.

    pause-health unit can be used before pausing the ceph units to stop the
    cluster rebalancing the data off this unit.  pause-health sets 'noout' on
    the cluster such that it will not try to rebalance the data accross the
    remaining units.

    It is up to the user of the charm to determine whether pause-health should
    be used as it depends on whether the osd is being paused for maintenance or
    to remove it from the cluster completely.
resume:
  description: |
    Set the local osd units in the charm to 'in'. Note that the pause option
    does NOT stop the osd processes.
create-pool:
  description: Creates a pool
  params:
    name:
      type: string
      description: The name of the pool
    profile-name:
      type: string
      description: The crush profile to use for this pool.  The ruleset must exist first.
    pool-type:
      type: string
      default: "replicated"
      enum: [replicated, erasure]
      description: |
        The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the
        objects or erasure to get a kind of generalized RAID5 capability.
    replicas:
      type: integer
      default: 3
      description: |
        For the replicated pool this is the number of replicas to store of each object.
    erasure-profile-name:
      type: string
      default: default
      description: |
        The name of the erasure coding profile to use for this pool. Note this profile must exist
        before calling create-pool
  required: [name]
  additionalProperties: false
create-erasure-profile:
  description: Create a new erasure code profile to use on a pool.
  params:
    name:
      type: string
      description: The name of the profile
    failure-domain:
      type: string
      default: host
      enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row]
      description: |
        The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host.
    plugin:
      type: string
      default: "jerasure"
      enum: [jerasure, isa, lrc, shec]
      description: |
        The erasure plugin to use for this profile.
        See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details
    data-chunks:
      type: integer
      default: 3
      description: |
        The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance
        if K = 2 a 10KB object will be divided into K objects of 5KB each.
    coding-chunks:
      type: integer
      default: 2
      description: |
        The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions.
        If there are 2 coding chunks, it means 2 OSDs can be out without losing data.
    locality-chunks:
      type: integer
      description: |
        Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3
        two groups of three are created. Each set can be recovered without reading chunks from another set.
    durability-estimator:
      type: integer
      description: |
        The number of parity chunks each of which includes each data chunk in its calculation range. The number is used
        as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data.
  required: [name, data-chunks, coding-chunks]
  additionalProperties: false
get-erasure-profile:
  description: Display an erasure code profile.
  params:
    name:
      type: string
      description: The name of the profile
  required: [name]
  additionalProperties: false
delete-erasure-profile:
  description: Deletes an erasure code profile.
  params:
    name:
      type: string
      description: The name of the profile
  required: [name]
  additionalProperties: false
list-erasure-profiles:
  description: List the names of all erasure code profiles
  additionalProperties: false
list-pools:
  description: List your cluster’s pools
  additionalProperties: false
set-pool-max-bytes:
  description: Set pool quotas for the maximum number of bytes.
  params:
    max:
      type: integer
      description: The name of the pool
    pool-name:
      type: string
      description: The name of the pool
  required: [pool-name, max]
  additionalProperties: false
delete-pool:
  description: Deletes the named pool
  params:
    pool-name:
      type: string
      description: The name of the pool
  required: [pool-name]
  additionalProperties: false
rename-pool:
  description: Rename a pool
  params:
    pool-name:
      type: string
      description: The name of the pool
    new-name:
      type: string
      description: The new name of the pool
  required: [pool-name, new-name]
  additionalProperties: false
pool-statistics:
  description: Show a pool’s utilization statistics
  additionalProperties: false
snapshot-pool:
  description: Snapshot a pool
  params:
    pool-name:
      type: string
      description: The name of the pool
    snapshot-name:
      type: string
      description: The name of the snapshot
  required: [snapshot-name, pool-name]
  additionalProperties: false
remove-pool-snapshot:
  description: Remove a pool snapshot
  params:
    pool-name:
      type: string
      description: The name of the pool
    snapshot-name:
      type: string
      description: The name of the snapshot
  required: [snapshot-name, pool-name]
  additionalProperties: false
pool-set:
  description: Set a value for the pool
  params:
    pool-name:
      type: string
      description: The pool to set this variable on.
    key:
      type: string
      description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
    value:
      type: string
      description: The value to set
  required: [key, value, pool-name]
  additionalProperties: false
pool-get:
  description: Get a value for the pool
  params:
    pool-name:
      type: string
      description: The pool to get this variable from.
    key:
      type: string
      description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values
  required: [key, pool-name]
  additionalProperties: false