docker.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. package docker
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "goseg/config"
  7. "goseg/structs"
  8. "log/slog"
  9. "os"
  10. "strings"
  11. "time"
  12. "github.com/docker/docker/api/types"
  13. "github.com/docker/docker/api/types/container"
  14. "github.com/docker/docker/client"
  15. )
  16. var (
  17. logger = slog.New(slog.NewJSONHandler(os.Stdout, nil))
  18. EventBus = make(chan structs.Event, 100)
  19. )
  20. // return the container status of a slice of ships
  21. func GetShipStatus(patps []string) (map[string]string, error) {
  22. statuses := make(map[string]string)
  23. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  24. if err != nil {
  25. errmsg := fmt.Sprintf("Error getting Docker info: %v", err)
  26. logger.Error(errmsg)
  27. return statuses, err
  28. } else {
  29. containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
  30. if err != nil {
  31. errmsg := fmt.Sprintf("Error getting containers: %v", err)
  32. logger.Error(errmsg)
  33. return statuses, err
  34. } else {
  35. for _, pier := range patps {
  36. found := false
  37. for _, container := range containers {
  38. for _, name := range container.Names {
  39. fasPier := "/" + pier
  40. if name == fasPier {
  41. statuses[pier] = container.Status
  42. found = true
  43. break
  44. }
  45. }
  46. if found {
  47. break
  48. }
  49. }
  50. if !found {
  51. statuses[pier] = "not found"
  52. }
  53. }
  54. }
  55. return statuses, nil
  56. }
  57. }
  58. // return the name of a container's network
  59. func GetContainerNetwork(name string) (string, error) {
  60. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  61. if err != nil {
  62. return "", err
  63. }
  64. defer cli.Close()
  65. containerJSON, err := cli.ContainerInspect(context.Background(), name)
  66. if err != nil {
  67. return "", err
  68. }
  69. for networkName := range containerJSON.NetworkSettings.Networks {
  70. return networkName, nil
  71. }
  72. return "", fmt.Errorf("container is not attached to any network")
  73. }
  74. // return the disk and memory usage for a container
  75. func GetContainerStats(containerName string) (structs.ContainerStats, error) {
  76. var res structs.ContainerStats
  77. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  78. if err != nil {
  79. return res, err
  80. }
  81. defer cli.Close()
  82. statsResp, err := cli.ContainerStats(context.Background(), containerName, false)
  83. if err != nil {
  84. return res, err
  85. }
  86. defer statsResp.Body.Close()
  87. var stat types.StatsJSON
  88. if err := json.NewDecoder(statsResp.Body).Decode(&stat); err != nil {
  89. return res, err
  90. }
  91. memUsage := stat.MemoryStats.Usage
  92. inspectResp, err := cli.ContainerInspect(context.Background(), containerName)
  93. if err != nil {
  94. return res, err
  95. }
  96. diskUsage := int64(0)
  97. if inspectResp.SizeRw != nil {
  98. diskUsage = *inspectResp.SizeRw
  99. }
  100. return structs.ContainerStats{
  101. MemoryUsage: memUsage,
  102. DiskUsage: diskUsage,
  103. }, nil
  104. }
  105. // start a container by name + type
  106. // contructs a container.Config, then runs through whether to boot/restart/etc
  107. // saves the current container state in memory after completion
  108. func StartContainer(containerName string, containerType string) (structs.ContainerState, error) {
  109. var containerState structs.ContainerState
  110. var containerConfig container.Config
  111. // switch on containerType to process containerConfig
  112. switch containerType {
  113. case "vere":
  114. // containerConfig, err := urbitContainerConf(containerName)
  115. _, err := urbitContainerConf(containerName)
  116. if err != nil {
  117. return containerState, err
  118. }
  119. default:
  120. errmsg := fmt.Errorf("Unrecognized container type %s",containerType)
  121. return containerState, errmsg
  122. }
  123. ctx := context.Background()
  124. cli, err := client.NewClientWithOpts(client.FromEnv)
  125. if err != nil {
  126. return containerState, err
  127. }
  128. // get the desired tag and hash from config
  129. containerInfo, err := GetLatestContainerInfo(containerType)
  130. if err != nil {
  131. return containerState, err
  132. }
  133. // check if container exists
  134. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  135. if err != nil {
  136. return containerState, err
  137. }
  138. var existingContainer *types.Container = nil
  139. for _, container := range containers {
  140. for _, name := range container.Names {
  141. if name == "/"+containerName {
  142. existingContainer = &container
  143. break
  144. }
  145. }
  146. if existingContainer != nil {
  147. break
  148. }
  149. }
  150. desiredTag := containerInfo["tag"]
  151. desiredHash := containerInfo["hash"]
  152. desiredRepo := containerInfo["repo"]
  153. desiredImage := fmt.Sprintf("%s:%s@sha256:%s", desiredRepo, desiredTag, desiredHash)
  154. desiredStatus := "running"
  155. // check if the desired image is available locally
  156. images, err := cli.ImageList(ctx, types.ImageListOptions{})
  157. if err != nil {
  158. return containerState, err
  159. }
  160. imageExistsLocally := false
  161. for _, img := range images {
  162. if img.ID == desiredHash {
  163. imageExistsLocally = true
  164. break
  165. }
  166. if imageExistsLocally {
  167. break
  168. }
  169. }
  170. if !imageExistsLocally {
  171. // pull the image if it doesn't exist locally
  172. _, err = cli.ImagePull(ctx, desiredImage, types.ImagePullOptions{})
  173. if err != nil {
  174. return containerState, err
  175. }
  176. }
  177. switch {
  178. case existingContainer == nil:
  179. // if the container does not exist, create and start it
  180. _, err := cli.ContainerCreate(ctx, &containerConfig, nil, nil, nil, containerName)
  181. if err != nil {
  182. return containerState, err
  183. }
  184. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  185. if err != nil {
  186. return containerState, err
  187. }
  188. msg := fmt.Sprintf("%s started with image %s", containerName, desiredImage)
  189. logger.Info(msg)
  190. case existingContainer.State == "exited":
  191. // if the container exists but is stopped, start it
  192. err := cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  193. if err != nil {
  194. return containerState, err
  195. }
  196. msg := fmt.Sprintf("Started stopped container %s", containerName)
  197. logger.Info(msg)
  198. default:
  199. // if container is running, check the image digest
  200. currentImage := existingContainer.Image
  201. digestParts := strings.Split(currentImage, "@sha256:")
  202. currentDigest := ""
  203. if len(digestParts) > 1 {
  204. currentDigest = digestParts[1]
  205. }
  206. if currentDigest != desiredHash {
  207. // if the hashes don't match, recreate the container with the new one
  208. err := cli.ContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true})
  209. if err != nil {
  210. return containerState, err
  211. }
  212. _, err = cli.ContainerCreate(ctx, &container.Config{
  213. Image: desiredImage,
  214. }, nil, nil, nil, containerName)
  215. if err != nil {
  216. return containerState, err
  217. }
  218. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  219. if err != nil {
  220. return containerState, err
  221. }
  222. msg := fmt.Sprintf("Restarted %s with image %s", containerName, desiredImage)
  223. logger.Info(msg)
  224. }
  225. }
  226. containerDetails, err := cli.ContainerInspect(ctx, containerName)
  227. if err != nil {
  228. return containerState, fmt.Errorf("failed to inspect container %s: %v", containerName, err)
  229. }
  230. // save the current state of the container in memory for reference
  231. containerState = structs.ContainerState{
  232. ID: containerDetails.ID, // container id hash
  233. Name: containerName, // name (eg @p)
  234. Image: desiredImage, // full repo:tag@hash string
  235. Type: containerType, // eg `vere` (corresponds with version server label)
  236. DesiredStatus: desiredStatus, // what the user sets
  237. ActualStatus: containerDetails.State.Status, // what the daemon reports
  238. CreatedAt: containerDetails.Created, // this is a string
  239. Config: containerConfig, // container.Config struct constructed above
  240. }
  241. return containerState, err
  242. }
  243. // convert the version info back into json then a map lol
  244. // so we can easily get the correct repo/release channel/tag/hash
  245. func GetLatestContainerInfo(containerType string) (map[string]string, error) {
  246. var res map[string]string
  247. arch := config.Architecture
  248. hashLabel := arch + "_sha256"
  249. versionInfo := config.VersionInfo
  250. jsonData, err := json.Marshal(versionInfo)
  251. if err != nil {
  252. return res, err
  253. }
  254. // Convert JSON to map
  255. var m map[string]interface{}
  256. err = json.Unmarshal(jsonData, &m)
  257. if err != nil {
  258. return res, err
  259. }
  260. containerData, ok := m[containerType].(map[string]interface{})
  261. if !ok {
  262. return nil, fmt.Errorf("%s data is not a map", containerType)
  263. }
  264. tag, ok := containerData["tag"].(string)
  265. if !ok {
  266. return nil, fmt.Errorf("'tag' is not a string")
  267. }
  268. hashValue, ok := containerData[hashLabel].(string)
  269. if !ok {
  270. return nil, fmt.Errorf("'%s' is not a string", hashLabel)
  271. }
  272. repo, ok := containerData["repo"].(string)
  273. if !ok {
  274. return nil, fmt.Errorf("'repo' is not a string")
  275. }
  276. res = make(map[string]string)
  277. res["tag"] = tag
  278. res["hash"] = hashValue
  279. res["repo"] = repo
  280. return res, nil
  281. }
  282. // stop a container with the name
  283. func StopContainerByName(containerName string) error {
  284. ctx := context.Background()
  285. cli, err := client.NewClientWithOpts(client.FromEnv)
  286. if err != nil {
  287. return err
  288. }
  289. // fetch all containers incl stopped
  290. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  291. if err != nil {
  292. return err
  293. }
  294. for _, cont := range containers {
  295. for _, name := range cont.Names {
  296. if name == "/"+containerName {
  297. // Stop the container
  298. options := container.StopOptions{}
  299. if err := cli.ContainerStop(ctx, cont.ID, options); err != nil {
  300. return fmt.Errorf("failed to stop container %s: %v", containerName, err)
  301. }
  302. logger.Info(fmt.Sprintf("Successfully stopped container %s\n", containerName))
  303. return nil
  304. }
  305. }
  306. }
  307. return fmt.Errorf("container with name %s not found", containerName)
  308. }
  309. // subscribe to docker events and feed them into eventbus
  310. func DockerListener() {
  311. ctx := context.Background()
  312. cli, err := client.NewClientWithOpts(client.FromEnv)
  313. if err != nil {
  314. logger.Error(fmt.Sprintf("Error initializing Docker client: %v", err))
  315. return
  316. }
  317. messages, errs := cli.Events(ctx, types.EventsOptions{})
  318. for {
  319. select {
  320. case event := <-messages:
  321. // Convert the Docker event to our custom event and send it to the EventBus
  322. EventBus <- structs.Event{Type: event.Action, Data: event}
  323. case err := <-errs:
  324. logger.Error(fmt.Sprintf("Docker event error: %v", err))
  325. }
  326. }
  327. }
  328. // periodically poll docker in case we miss something
  329. func DockerPoller() {
  330. ticker := time.NewTicker(10 * time.Second)
  331. for {
  332. select {
  333. case <-ticker.C:
  334. logger.Info("polling docker")
  335. // todo (maybe not necessary?)
  336. // fetch the status of all containers and compare with app's state
  337. // if there's a change, send an event to the EventBus
  338. return
  339. }
  340. }
  341. }