// connect builds up the ceph conn and default pool func (d *cephRBDVolumeDriver) connect() { log.Println("INFO: connecting to Ceph and default pool context") d.m.Lock() defer d.m.Unlock() // create reusable go-ceph Client Connection var cephConn *rados.Conn var err error if d.cluster == "" { cephConn, err = rados.NewConnWithUser(d.user) } else { // FIXME: TODO: can't seem to use a cluster name -- get error -22 from go-ceph/rados: // panic: Unable to create ceph connection to cluster=ceph with user=admin: rados: ret=-22 cephConn, err = rados.NewConnWithClusterAndUser(d.cluster, d.user) } if err != nil { log.Panicf("ERROR: Unable to create ceph connection to cluster=%s with user=%s: %s", d.cluster, d.user, err) } // read ceph.conf and setup connection if d.config == "" { err = cephConn.ReadDefaultConfigFile() } else { err = cephConn.ReadConfigFile(d.config) } if err != nil { log.Panicf("ERROR: Unable to read ceph config: %s", err) } err = cephConn.Connect() if err != nil { log.Panicf("ERROR: Unable to connect to Ceph: %s", err) } // can now set conn in driver d.conn = cephConn // setup the default context (pool most likely to be used) defaultContext, err := d.openContext(d.defaultPool) if err != nil { log.Panicf("ERROR: Unable to connect to default Ceph Pool: %s", err) } d.defaultIoctx = defaultContext }
func TestNewConnWithClusterAndUser(t *testing.T) { _, err := rados.NewConnWithClusterAndUser("ceph", "client.admin") assert.Equal(t, err, nil) }