docker.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. package docker
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "goseg/config"
  7. "goseg/structs"
  8. "log/slog"
  9. "os"
  10. "time"
  11. "github.com/docker/docker/api/types"
  12. "github.com/docker/docker/api/types/container"
  13. "github.com/docker/docker/client"
  14. )
  15. var (
  16. logger = slog.New(slog.NewJSONHandler(os.Stdout, nil))
  17. EventBus = make(chan structs.Event, 100)
  18. )
  19. // return the container status of a slice of ships
  20. func GetShipStatus(patps []string) (map[string]string, error) {
  21. statuses := make(map[string]string)
  22. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  23. if err != nil {
  24. errmsg := fmt.Sprintf("Error getting Docker info: %v", err)
  25. logger.Error(errmsg)
  26. return statuses, err
  27. } else {
  28. containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
  29. if err != nil {
  30. errmsg := fmt.Sprintf("Error getting containers: %v", err)
  31. logger.Error(errmsg)
  32. return statuses, err
  33. } else {
  34. for _, pier := range patps {
  35. found := false
  36. for _, container := range containers {
  37. for _, name := range container.Names {
  38. fasPier := "/" + pier
  39. if name == fasPier {
  40. statuses[pier] = container.Status
  41. found = true
  42. break
  43. }
  44. }
  45. if found {
  46. break
  47. }
  48. }
  49. if !found {
  50. statuses[pier] = "not found"
  51. }
  52. }
  53. }
  54. return statuses, nil
  55. }
  56. }
  57. // return the name of a container's network
  58. func GetContainerNetwork(name string) (string, error) {
  59. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  60. if err != nil {
  61. return "", err
  62. }
  63. defer cli.Close()
  64. containerJSON, err := cli.ContainerInspect(context.Background(), name)
  65. if err != nil {
  66. return "", err
  67. }
  68. for networkName := range containerJSON.NetworkSettings.Networks {
  69. return networkName, nil
  70. }
  71. return "", fmt.Errorf("container is not attached to any network")
  72. }
  73. // return the disk and memory usage for a container
  74. func GetContainerStats(containerName string) (structs.ContainerStats, error) {
  75. var res structs.ContainerStats
  76. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  77. if err != nil {
  78. return res, err
  79. }
  80. defer cli.Close()
  81. statsResp, err := cli.ContainerStats(context.Background(), containerName, false)
  82. if err != nil {
  83. return res, err
  84. }
  85. defer statsResp.Body.Close()
  86. var stat types.StatsJSON
  87. if err := json.NewDecoder(statsResp.Body).Decode(&stat); err != nil {
  88. return res, err
  89. }
  90. memUsage := stat.MemoryStats.Usage
  91. inspectResp, err := cli.ContainerInspect(context.Background(), containerName)
  92. if err != nil {
  93. return res, err
  94. }
  95. diskUsage := int64(0)
  96. if inspectResp.SizeRw != nil {
  97. diskUsage = *inspectResp.SizeRw
  98. }
  99. return structs.ContainerStats{
  100. MemoryUsage: memUsage,
  101. DiskUsage: diskUsage,
  102. }, nil
  103. }
  104. // start a container by name + type
  105. // not for booting new ships
  106. func StartContainer(containerName string, containerType string) (structs.ContainerState, error) {
  107. var containerState structs.ContainerState
  108. ctx := context.Background()
  109. cli, err := client.NewClientWithOpts(client.FromEnv)
  110. if err != nil {
  111. return containerState, err
  112. }
  113. // get the desired tag and hash from config
  114. containerInfo, err := GetLatestContainerInfo(containerType)
  115. if err != nil {
  116. return containerState, err
  117. }
  118. // check if container exists
  119. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  120. if err != nil {
  121. return containerState, err
  122. }
  123. var existingContainer *types.Container = nil
  124. for _, container := range containers {
  125. for _, name := range container.Names {
  126. if name == "/"+containerName {
  127. existingContainer = &container
  128. break
  129. }
  130. }
  131. if existingContainer != nil {
  132. break
  133. }
  134. }
  135. desiredTag := containerInfo["tag"]
  136. desiredHash := containerInfo["hash"]
  137. desiredRepo := containerInfo["repo"]
  138. desiredImage := fmt.Sprintf("%s:%s@sha256:%s", desiredRepo, desiredTag, desiredHash)
  139. desiredStatus := "running"
  140. if desiredTag == "" || desiredHash == "" {
  141. err = fmt.Errorf("Version info has not been retrieved!")
  142. return containerState, err
  143. }
  144. // check if the desired image is available locally
  145. images, err := cli.ImageList(ctx, types.ImageListOptions{})
  146. if err != nil {
  147. return containerState, err
  148. }
  149. imageExistsLocally := false
  150. for _, img := range images {
  151. for _, tag := range img.RepoTags {
  152. if tag == desiredRepo+":"+desiredTag && img.ID == desiredHash {
  153. imageExistsLocally = true
  154. break
  155. }
  156. }
  157. if imageExistsLocally {
  158. break
  159. }
  160. }
  161. if !imageExistsLocally {
  162. // pull the image if it doesn't exist locally
  163. _, err = cli.ImagePull(ctx, desiredImage, types.ImagePullOptions{})
  164. if err != nil {
  165. return containerState, err
  166. }
  167. }
  168. switch {
  169. case existingContainer == nil:
  170. // if the container does not exist, create and start it
  171. _, err := cli.ContainerCreate(ctx, &container.Config{
  172. Image: desiredImage,
  173. }, nil, nil, nil, containerName)
  174. if err != nil {
  175. return containerState, err
  176. }
  177. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  178. if err != nil {
  179. return containerState, err
  180. }
  181. msg := fmt.Sprintf("%s started with image %s", containerName, desiredImage)
  182. logger.Info(msg)
  183. case existingContainer.State == "exited":
  184. // if the container exists but is stopped, start it
  185. err := cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  186. if err != nil {
  187. return containerState, err
  188. }
  189. msg := fmt.Sprintf("Started stopped container %s", containerName)
  190. logger.Info(msg)
  191. default:
  192. // debug
  193. currentImage := existingContainer.Image
  194. tagAndDigest := strings.Split(currentImage, ":")[1] // This will give "latest@sha256"
  195. currentTag := strings.Split(tagAndDigest, "@")[0] // This will give "latest"
  196. logger.Info(fmt.Sprintf("Current image: %s, Current tag: %s, Desired tag: %s, Current ID: %s, Desired ID: %s",
  197. currentImage, currentTag, desiredTag, existingContainer.ImageID, desiredHash))
  198. //
  199. // if container is running, check the image hash
  200. if existingContainer.ImageID != desiredHash {
  201. // if the hashes don't match, recreate the container with the new one
  202. err := cli.ContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true})
  203. if err != nil {
  204. return containerState, err
  205. }
  206. _, err = cli.ContainerCreate(ctx, &container.Config{
  207. Image: desiredImage,
  208. }, nil, nil, nil, containerName)
  209. if err != nil {
  210. return containerState, err
  211. }
  212. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  213. if err != nil {
  214. return containerState, err
  215. }
  216. msg := fmt.Sprintf("Restarted %s with image %s", containerName, desiredImage)
  217. logger.Info(msg)
  218. } else {
  219. msg := fmt.Sprintf("%s is already running with the correct tag: %s", containerName, desiredTag)
  220. logger.Info(msg)
  221. }
  222. }
  223. containerDetails, err := cli.ContainerInspect(ctx, containerName)
  224. if err != nil {
  225. return containerState, fmt.Errorf("failed to inspect container %s: %v", containerName, err)
  226. }
  227. containerState = structs.ContainerState{
  228. ID: containerDetails.ID, // container id hash
  229. Name: containerName, // name (eg @p)
  230. Image: desiredImage, // full repo:tag@hash string
  231. Type: containerType, // eg `vere` (corresponds with version server label)
  232. DesiredStatus: desiredStatus, // what the user sets
  233. ActualStatus: containerDetails.State.Status, // what the daemon reports
  234. CreatedAt: containerDetails.Created, // this is a string
  235. }
  236. return containerState, err
  237. }
  238. // convert the version info back into json then a map lol
  239. // so we can easily get the correct repo/release channel/tag/hash
  240. func GetLatestContainerInfo(containerType string) (map[string]string, error) {
  241. var res map[string]string
  242. arch := config.Architecture
  243. hashLabel := arch + "_sha256"
  244. versionInfo := config.VersionInfo
  245. jsonData, err := json.Marshal(versionInfo)
  246. if err != nil {
  247. return res, err
  248. }
  249. // Convert JSON to map
  250. var m map[string]interface{}
  251. err = json.Unmarshal(jsonData, &m)
  252. if err != nil {
  253. return res, err
  254. }
  255. containerData, ok := m[containerType].(map[string]interface{})
  256. if !ok {
  257. return nil, fmt.Errorf("%s data is not a map", containerType)
  258. }
  259. tag, ok := containerData["tag"].(string)
  260. if !ok {
  261. return nil, fmt.Errorf("'tag' is not a string")
  262. }
  263. hashValue, ok := containerData[hashLabel].(string)
  264. if !ok {
  265. return nil, fmt.Errorf("'%s' is not a string", hashLabel)
  266. }
  267. repo, ok := containerData["repo"].(string)
  268. if !ok {
  269. return nil, fmt.Errorf("'repo' is not a string")
  270. }
  271. res = make(map[string]string)
  272. res["tag"] = tag
  273. res["hash"] = hashValue
  274. res["repo"] = repo
  275. return res, nil
  276. }
  277. // stop a container with the name
  278. func StopContainerByName(containerName string) error {
  279. ctx := context.Background()
  280. cli, err := client.NewClientWithOpts(client.FromEnv)
  281. if err != nil {
  282. return err
  283. }
  284. // fetch all containers incl stopped
  285. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  286. if err != nil {
  287. return err
  288. }
  289. for _, cont := range containers {
  290. for _, name := range cont.Names {
  291. if name == "/"+containerName {
  292. // Stop the container
  293. options := container.StopOptions{}
  294. if err := cli.ContainerStop(ctx, cont.ID, options); err != nil {
  295. return fmt.Errorf("failed to stop container %s: %v", containerName, err)
  296. }
  297. logger.Info(fmt.Sprintf("Successfully stopped container %s\n", containerName))
  298. return nil
  299. }
  300. }
  301. }
  302. return fmt.Errorf("container with name %s not found", containerName)
  303. }
  304. // subscribe to docker events and feed them into eventbus
  305. func DockerListener() {
  306. ctx := context.Background()
  307. cli, err := client.NewClientWithOpts(client.FromEnv)
  308. if err != nil {
  309. logger.Error(fmt.Sprintf("Error initializing Docker client: %v", err))
  310. return
  311. }
  312. messages, errs := cli.Events(ctx, types.EventsOptions{})
  313. for {
  314. select {
  315. case event := <-messages:
  316. // Convert the Docker event to our custom event and send it to the EventBus
  317. EventBus <- structs.Event{Type: event.Action, Data: event}
  318. case err := <-errs:
  319. logger.Error(fmt.Sprintf("Docker event error: %v", err))
  320. }
  321. }
  322. }
  323. // periodically poll docker in case we miss something
  324. func DockerPoller() {
  325. ticker := time.NewTicker(10 * time.Second)
  326. for {
  327. select {
  328. case <-ticker.C:
  329. logger.Info("polling docker")
  330. // fetch the status of all containers and compare with app's state
  331. // if there's a change, send an event to the EventBus
  332. return
  333. }
  334. }
  335. }