[{"data":1,"prerenderedAt":1312},["ShallowReactive",2],{"/en-us/blog/tags/aws/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":453,"AWS-tag-page-en-us":664},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/aws","tags",false,"",{"tag":9,"tagSlug":10},"AWS","aws",{"template":12},"BlogTag","content:en-us:blog:tags:aws.yml","yaml","Aws","content","en-us/blog/tags/aws.yml","en-us/blog/tags/aws","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"image":445,"config":448,"_id":450,"_type":14,"_source":16,"_file":451,"_stem":452,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"config":446},{"src":447},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":449},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":454,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":455,"_id":660,"_type":14,"title":661,"_source":16,"_file":662,"_stem":663,"_extension":19},"/shared/en-us/main-footer",{"text":456,"source":457,"edit":463,"contribute":468,"config":473,"items":478,"minimal":652},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":458,"config":459},"View page source",{"href":460,"dataGaName":461,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":464,"config":465},"Edit this page",{"href":466,"dataGaName":467,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":469,"config":470},"Please contribute",{"href":471,"dataGaName":472,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":474,"facebook":475,"youtube":476,"linkedin":477},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[479,502,559,588,622],{"title":46,"links":480,"subMenu":485},[481],{"text":482,"config":483},"DevSecOps platform",{"href":55,"dataGaName":484,"dataGaLocation":462},"devsecops platform",[486],{"title":186,"links":487},[488,492,497],{"text":489,"config":490},"View plans",{"href":188,"dataGaName":491,"dataGaLocation":462},"view plans",{"text":493,"config":494},"Why Premium?",{"href":495,"dataGaName":496,"dataGaLocation":462},"/pricing/premium/","why premium",{"text":498,"config":499},"Why Ultimate?",{"href":500,"dataGaName":501,"dataGaLocation":462},"/pricing/ultimate/","why ultimate",{"title":503,"links":504},"Solutions",[505,510,513,515,520,525,529,532,536,541,543,546,549,554],{"text":506,"config":507},"Digital transformation",{"href":508,"dataGaName":509,"dataGaLocation":462},"/topics/digital-transformation/","digital transformation",{"text":134,"config":511},{"href":129,"dataGaName":512,"dataGaLocation":462},"security & compliance",{"text":123,"config":514},{"href":105,"dataGaName":106,"dataGaLocation":462},{"text":516,"config":517},"Agile development",{"href":518,"dataGaName":519,"dataGaLocation":462},"/solutions/agile-delivery/","agile delivery",{"text":521,"config":522},"Cloud transformation",{"href":523,"dataGaName":524,"dataGaLocation":462},"/topics/cloud-native/","cloud transformation",{"text":526,"config":527},"SCM",{"href":119,"dataGaName":528,"dataGaLocation":462},"source code management",{"text":109,"config":530},{"href":111,"dataGaName":531,"dataGaLocation":462},"continuous integration & delivery",{"text":533,"config":534},"Value stream management",{"href":161,"dataGaName":535,"dataGaLocation":462},"value stream management",{"text":537,"config":538},"GitOps",{"href":539,"dataGaName":540,"dataGaLocation":462},"/solutions/gitops/","gitops",{"text":171,"config":542},{"href":173,"dataGaName":174,"dataGaLocation":462},{"text":544,"config":545},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":462},{"text":547,"config":548},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":462},{"text":550,"config":551},"Education",{"href":552,"dataGaName":553,"dataGaLocation":462},"/solutions/education/","education",{"text":555,"config":556},"Financial services",{"href":557,"dataGaName":558,"dataGaLocation":462},"/solutions/finance/","financial services",{"title":191,"links":560},[561,563,565,567,570,572,574,576,578,580,582,584,586],{"text":203,"config":562},{"href":205,"dataGaName":206,"dataGaLocation":462},{"text":208,"config":564},{"href":210,"dataGaName":211,"dataGaLocation":462},{"text":213,"config":566},{"href":215,"dataGaName":216,"dataGaLocation":462},{"text":218,"config":568},{"href":220,"dataGaName":569,"dataGaLocation":462},"docs",{"text":241,"config":571},{"href":243,"dataGaName":244,"dataGaLocation":462},{"text":236,"config":573},{"href":238,"dataGaName":239,"dataGaLocation":462},{"text":246,"config":575},{"href":248,"dataGaName":249,"dataGaLocation":462},{"text":259,"config":577},{"href":261,"dataGaName":262,"dataGaLocation":462},{"text":251,"config":579},{"href":253,"dataGaName":254,"dataGaLocation":462},{"text":264,"config":581},{"href":266,"dataGaName":267,"dataGaLocation":462},{"text":269,"config":583},{"href":271,"dataGaName":272,"dataGaLocation":462},{"text":274,"config":585},{"href":276,"dataGaName":277,"dataGaLocation":462},{"text":279,"config":587},{"href":281,"dataGaName":282,"dataGaLocation":462},{"title":297,"links":589},[590,592,594,596,598,600,602,606,611,613,615,617],{"text":304,"config":591},{"href":306,"dataGaName":299,"dataGaLocation":462},{"text":309,"config":593},{"href":311,"dataGaName":312,"dataGaLocation":462},{"text":317,"config":595},{"href":319,"dataGaName":320,"dataGaLocation":462},{"text":322,"config":597},{"href":324,"dataGaName":325,"dataGaLocation":462},{"text":327,"config":599},{"href":329,"dataGaName":330,"dataGaLocation":462},{"text":332,"config":601},{"href":334,"dataGaName":335,"dataGaLocation":462},{"text":603,"config":604},"Sustainability",{"href":605,"dataGaName":603,"dataGaLocation":462},"/sustainability/",{"text":607,"config":608},"Diversity, inclusion and belonging (DIB)",{"href":609,"dataGaName":610,"dataGaLocation":462},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":612},{"href":339,"dataGaName":340,"dataGaLocation":462},{"text":347,"config":614},{"href":349,"dataGaName":350,"dataGaLocation":462},{"text":352,"config":616},{"href":354,"dataGaName":355,"dataGaLocation":462},{"text":618,"config":619},"Modern Slavery Transparency Statement",{"href":620,"dataGaName":621,"dataGaLocation":462},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":623,"links":624},"Contact Us",[625,628,630,632,637,642,647],{"text":626,"config":627},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":462},{"text":366,"config":629},{"href":368,"dataGaName":369,"dataGaLocation":462},{"text":371,"config":631},{"href":373,"dataGaName":374,"dataGaLocation":462},{"text":633,"config":634},"Status",{"href":635,"dataGaName":636,"dataGaLocation":462},"https://status.gitlab.com/","status",{"text":638,"config":639},"Terms of use",{"href":640,"dataGaName":641,"dataGaLocation":462},"/terms/","terms of use",{"text":643,"config":644},"Privacy statement",{"href":645,"dataGaName":646,"dataGaLocation":462},"/privacy/","privacy statement",{"text":648,"config":649},"Cookie preferences",{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":653},[654,656,658],{"text":638,"config":655},{"href":640,"dataGaName":641,"dataGaLocation":462},{"text":643,"config":657},{"href":645,"dataGaName":646,"dataGaLocation":462},{"text":648,"config":659},{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":665,"featuredPost":1289,"totalPagesCount":1310,"initialPosts":1311},[666,695,717,740,761,781,804,824,844,864,881,901,923,946,968,987,1008,1027,1046,1064,1083,1103,1124,1144,1164,1182,1205,1224,1246,1267],{"_path":667,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":668,"content":676,"config":688,"_id":691,"_type":14,"title":692,"_source":16,"_file":693,"_stem":694,"_extension":19},"/en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"title":669,"description":670,"ogTitle":669,"ogDescription":670,"noIndex":6,"ogImage":671,"ogUrl":672,"ogSiteName":673,"ogType":674,"canonicalUrls":672,"schema":675},"Accelerate code reviews with GitLab Duo and Amazon Q","Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750096976/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750096975734.png","https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate code reviews with GitLab Duo and Amazon Q\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":669,"description":670,"authors":677,"heroImage":671,"date":679,"body":680,"category":681,"tags":682},[678],"Cesar Saavedra","2025-06-02","Code reviews are critical for catching bugs, improving code readability, and maintaining coding standards, but they can also be a major bottleneck in your workflow. When you're trying to ship features quickly, waiting for multiple team members to review your code can be frustrating. The back-and-forth discussions, the scheduling conflicts, and the time it takes to get everyone aligned can stretch what should be a simple review into days or even weeks.\n\nHere's where [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), our new offering that delivers agentic AI throughout the software development lifecycle for AWS customers, comes in to transform your review process. This intelligent, AI-powered solution can perform comprehensive code reviews for you in a fraction of the time it would take your human colleagues. By leveraging advanced agentic AI capabilities, GitLab Duo with Amazon Q streamlines your entire review workflow without sacrificing the quality and thoroughness you need. Think of it as having an always-available, highly skilled reviewer who can instantly analyze your code and provide actionable feedback.\n\n## How it works: Launching a code review\n\nSo how does GitLab Duo with Amazon Q actually work? Let's say you've just finished working on a feature and created a merge request with multiple code updates. Instead of pinging your teammates and waiting for their availability, you simply enter a quick command in the comment section: \"/q review\". That's it – just those two words trigger the AI to spring into action.\n\n![Triggering a code review using GitLab Duo with Amazon Q](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097002/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097002096.png)\n\nOnce you've entered the command, Amazon Q Service immediately begins analyzing your code changes. You'll see a confirmation that the review is underway, and within moments, the AI is examining every line of your updates, checking for potential issues across multiple dimensions.\nWhen the review completes, you receive comprehensive feedback that covers all the bases: bug detection, readability improvements, syntax errors, and adherence to your team's coding standards. The AI doesn't just point out problems, it provides context and suggestions for fixing them, making it easy for you to understand what needs attention and why.\n\nThe beauty of this agentic AI approach is that it handles the heavy lifting of code review while you focus on what matters most: building great software. You get the benefits of thorough code reviews — better bug detection, consistent coding standards, and improved code quality — without the time sink. Your deployment times shrink dramatically because you're no longer waiting in review queues, and your entire team becomes more productive.\n\n## Why use GitLab Duo with Amazon Q?\n\nGitLab Duo with Amazon Q transforms your development workflow in the following ways:\n- Lightning-fast code reviews that don't compromise on quality\n- Consistent application of coding standards across your entire codebase\n- Immediate feedback that helps you fix issues before they reach production\n- Reduced deployment times that let you ship features faster\n- More time for your team to focus on creative problem-solving instead of repetitive reviews\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can revolutionize your code review process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4gFIgyFc02Q?si=GXVz--AIrWiwzf-I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n> \n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)","ai-ml",[683,482,684,685,686,282,9,687],"AI/ML","code review","product","features","tutorial",{"slug":689,"featured":91,"template":690},"accelerate-code-reviews-with-gitlab-duo-and-amazon-q","BlogPost","content:en-us:blog:accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","Accelerate Code Reviews With Gitlab Duo And Amazon Q","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"_path":696,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":697,"content":703,"config":711,"_id":713,"_type":14,"title":714,"_source":16,"_file":715,"_stem":716,"_extension":19},"/en-us/blog/amazon-linux-2-service-ready-partner",{"title":698,"description":699,"ogTitle":698,"ogDescription":699,"noIndex":6,"ogImage":700,"ogUrl":701,"ogSiteName":673,"ogType":674,"canonicalUrls":701,"schema":702},"GitLab is now an Amazon Linux 2 Service Ready Partner","Being an Amazon Linux 2 Service Ready partner shows GitLab's strong commitment to AWS linux distributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682451/Blog/Hero%20Images/isis-franca-hsPFuudRg5I-unsplash.jpg","https://about.gitlab.com/blog/amazon-linux-2-service-ready-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an Amazon Linux 2 Service Ready Partner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-21\",\n      }",{"title":698,"description":699,"authors":704,"heroImage":700,"date":706,"body":707,"category":708,"tags":709},[705],"Darwin Sanoy","2022-09-21","\n\nSeveral months ago, we shared that GitLab started officially supporting Amazon Linux 2 as well as providing packages for GitLab and GitLab Runner for x86 and Graviton ARM architectures.\n\nGitLab’s hard working Enablement Engineering team has taken this commitment to the next level by acquiring Amazon’s Service Ready Partner designation for Amazon Linux 2.\n\nThe AWS Service Ready program requires that GitLab provide specific evidence in regard to support, compatibility testing and security testing in order to deploy GitLab on Amazon Linux 2 with confidence.\n\nHere is GitLab’s [Amazon Linux 2 Service Ready Partner listing](https://aws.amazon.com/amazon-linux-2/partners/?partner-solutions-cards.sort-by=item.additionalFields.partnerNameLower&partner-solutions-cards.sort-order=asc&awsf.partner-solutions-filter-partner-type=*all&partner-solutions-cards.q=GitLab&partner-solutions-cards.q_operator=AND).\n\n## Amazon Linux 2 support in GitLab 15.0\n\nAmazon Linux 2 is supported in GitLab 15.0 and later. An [earlier blog](/blog/amazon-linux-2-support-and-distro-specific-packages/) discusses a variety of important points and provides some code in order to plan a smooth transition.\n\nThe Service Ready Designation has been received for version 15.3, but there were no changes made to the process from 15.0 to support the designation.\n\nGitLab Runner has had ARM64 binaries since 12.6.0 and now has Amazon Linux 2 RPM packages for those wanting package-based installs.\n\n## Inside the distribution team process for distribution support\n\nIt would be easy to think that adding support for additional Linux distros is a simple and easy process - but there is actually a lot of effort that goes into it. GitLab’s Distribution Team uses GitLab itself to apply full DevOps disciplines to the continuous building, testing and distribution of packaging for Amazon Linux. Here are just some of the steps in preparing a GitLab release for packaging:\n\n- Create an elastic scaling distro-specific CI build environment.\n- Create a distro-specific CI test environment.\n- 2380 compatibility tests are performed on the GitLab code base.\n- SAST and dependency security scanning are completed and a specific escalation procedure is applied for any vulnerabilities that are found.\n- Primary distributions such as distro specific .deb and .rpm packages are prepared specifically for each distribution.\n- Secondary distributions are done as well - this is when the official GitLab AMI is created.\n- CI builds and testing generally happen multiple times a week for Amazon Linux.\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2testsubgroups.png)\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2tests.png)\n\n## Need-to-know takeaways\n\n- GitLab is now an official Amazon Linux 2 Service Ready Partner.\n- Amazon Linux 2 RPM packages are available for GitLab from version 15.0 and for GitLab Runner.\n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/amazonlinuxandgravitonready.png){: .right}\n","engineering",[9,282,710],"DevOps",{"slug":712,"featured":6,"template":690},"amazon-linux-2-service-ready-partner","content:en-us:blog:amazon-linux-2-service-ready-partner.yml","Amazon Linux 2 Service Ready Partner","en-us/blog/amazon-linux-2-service-ready-partner.yml","en-us/blog/amazon-linux-2-service-ready-partner",{"_path":718,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":719,"content":725,"config":734,"_id":736,"_type":14,"title":737,"_source":16,"_file":738,"_stem":739,"_extension":19},"/en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"title":720,"description":721,"ogTitle":720,"ogDescription":721,"noIndex":6,"ogImage":722,"ogUrl":723,"ogSiteName":673,"ogType":674,"canonicalUrls":723,"schema":724},"Amazon Linux 2 support and distro-specific packages for GitLab","Learn how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682299/Blog/Hero%20Images/gitlab-blog-banner.png","https://about.gitlab.com/blog/amazon-linux-2-support-and-distro-specific-packages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Amazon Linux 2 support and distro-specific packages for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-02\",\n      }",{"title":720,"description":721,"authors":726,"heroImage":722,"date":727,"body":728,"category":729,"tags":730},[705],"2022-05-02","\n\nGitLab’s Distribution Engineering team has been hard at work getting Amazon Linux 2 distro-specific packages ready in preparation for GitLab’s official support of Amazon Linux 2. Starting with Version 15.0 of GitLab, Amazon Linux 2 is a supported distro and packages are available for both x86 and Graviton ARM architectures.\n\n## What is Amazon Linux 2?\n\nAmazon Linux 2 is the next-generation Amazon Linux operating system that provides a modern application environment with the most recent enhancements from the Linux community alongside long-term support. Amazon Linux 2 is accessible as a virtual machine image for on-premises development and testing. This lets you easily develop, test, and certify your applications right from your local development environment. \n\nAccording to the AWS FAQ page for Amazon Linux 2, the primary elements of this latest version of the operating system include:\n\n1. A Linux kernel tuned for performance on Amazon EC2.\n\n2. A set of core packages including systemd, GCC 7.3, Glibc 2.26, Binutils 2.29.1 that receive Long Term Support (LTS) from [AWS](/blog/deploy-aws/).\n\n3. An extras channel for rapidly evolving technologies that are likely to be updated frequently and outside the Long Term Support (LTS) model.\n\nAmazon Linux 2 has a support lifespan through June 20, 2024, to allow enough time for users to migrate to Amazon Linux 2022.\n\n\n## Safely moving forward to Amazon Linux 2 packages from EL7\n\nWhile Amazon Linux 2 has not been officially supported before 15.0, as a convenience to customers who wanted to use yum and RPM packages to install the EL7 packages, GitLab configured a workaround in our packaging services to direct Amazon Linux 2 yum requests to the EL7 packages. If you’ve been using GitLab’s yum repo registration script, you many not know that you were using EL7 packages and not distro-specific packages.\n\nThis workaround will be deprecated and requests from Amazon Linux 2 will get the distro-specific packages with the release of GitLab 15.3.0 on August 22, 2022.\n\nAs a convenience for those of you who have automation that depends directly on this workaround, we wanted to provide you with information on how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.\n\nGitLab documentation demonstrates how to call our managed yum repository setup scripts by downloading the latest copy and running it directly in [the instructions for installing instances](https://about.gitlab.com/install/#centos-7) and [the instructions for installing runners](https://docs.gitlab.com/runner/install/linux-repository.html).\n\nAny organization using GitLab’s EL 7 packages for Amazon Linux 2 will want to test with - and update to - the distro-specific packages as soon as possible as GitLab will only be testing Amazon Linux 2 with the Amazon Linux 2 specific packages going forward.\n\nWe also understand that the timing of the testing and migration to these packages must be done in a coordinated cutover so that the package type does not change in your existing stacks without you having made any changes. This can be more important if a GitLab stack has undergone platform qualification for compliance purposes.\n\nAmazon Linux 2 specific packages are only available for GitLab 14.9.0 and later. If your automation depends directly on GitLab’s repo configuration script and it is still pegged to a GitLab version prior to 14.9.0 when this change becomes GA, then action must be taken to prevent breaking that automation. We have devised an idempotent two-line script solution that you can put in place now to prevent disruption if you are still on a pre-14.9.0 version at the time the new behavior of `script.rpm.sh` becomes GA on August 22, 2022 with the release of GitLab 15.3.0.\n\nGitLab rake-based backup and restore will continue to work seamlessly across the distro-specific package changes if you have to restore to your Amazon Linux 2 built stack from an EL7 backup. If you are using third-party backup, you may wish to trigger a new backup immediately after transitioning to the new distro packages to avoid the scenario altogether.\n\n## Amazon Linux 2 packages for building GitLab instances before 15.3.0\n\nThe following code inserts two lines of code between those originally outlined in [the instructions for installing using RPM packages](/install/#centos-7). The first one (starts with `sed`) splices in the Amazon Linux 2 yum repo endpoint edits into the repository configuration file created by script.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if the package was already installed so that the new location will be used.\n\n> Sudo note: If you are using these commands interactively under the default SSH or SSM session manager user, then using `sudo su` before running this code is necessary. If you are using these commands in Infrastructure as Code (e.g. CloudFormation userdata scripts), then sudo may cause ‘command not found’ errors when the user running automation is already root equivalent. Be mindful about using interactively tested commands directly in your automation.\n\n```bash\n#Existing packaging script from https://about.gitlab.com/install/#centos-7\ncurl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh | sudo bash\n\n#Patch to preview and/or peg Amazon Linux 2 specific packages\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n#Existing install command (remove \"-y\" to validate package and arch mapping before install)\nyum install gitlab-ee -y\n```\n\n> Notice in this output that the **Version** ends in `.amazon2`. In this case the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n![Resolved GitLab Dependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-instance-dependencies-resolved.png)\n\n### Moving to Amazon Linux 2 packages early for a seamless post-GA transition\n\nWhen the script.rpm.sh script is cut over to always point Amazon Linux 2 to the new distro-specific packages, the sed command will no longer be necessary. However, sed is also idempotent and will not make edits if the search text is not found. This means you can use the sed command to switch over early, but not have to worry about a breaking change when the `script.rpm.sh` is updated.\n\n### Pegging EL 7 and/or a GitLab version prior to 14.9.0 for a seamless post-GA transition\n\nIf your automation is pegged to an earlier version of GitLab, you will need to keep using EL7 packages, and, in fact, after the cutover you would need to implement the opposite command (which is also idempotent to be implemented now).\n\n```bash\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA of gitlab repo script)\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n```\n\nJust like the sed command for taking distro-specific packages early, this command can be implemented immediately with no bad effects - which will seamlessly keeping your automation pegged to the EL 7 packages when `script.rpm.sh` is updated.\n\n## Amazon Linux 2 package for building GitLab Runners before 15.3.0\n\nThe following code inserts two lines of code between those originally [outlined in the instructions](https://docs.gitlab.com/runner/install/linux-repository.html). The first one (starts with `sed`) splices in the Amazon Linux 2 yum repo endpoint edits into the repository configuration file created by script.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if the package was already installed so that the new location will be used.\n\n> Sudo note: If you are using these commands interactively under the default SSH or SSM session manager user, then using `sudo su` before running this code is necessary. If you are using these commands in Infrastructure as Code (e.g. CloudFormation userdata scripts), then sudo may cause ‘command not found’ errors when the user running automation is already root equivalent. Be mindful about using interactively tested commands directly in your automation.\n\n```bash\n#Existing packaging script from https://docs.gitlab.com/runner/install/linux-repository.html\ncurl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\" | sudo bash\n\n#Patch to test or peg Amazon Linux 2 specific packages\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n#Existing install command (remove \"-y\" to validate package and arch mapping before install)\nyum install gitlab-runner -y\n```\n\n> Notice in this output that **Version** is not distro-specific. In this case the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n![Resolved GitLab Runner Dependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-runner-dependencies-resolved.png)\n\n## Pegging to EL 7 and/or a GitLab Runner version prior to 14.9.1 for a seamless post-GA transition\n\nThe underlying package for EL 7 and Amazon Linux 2 is literally a copy of the same package. However, the Amazon Linux 2 endpoint for Runner RPM packages have only been uploaded from GitLab Runner 14.9.1 and later, so if you have runners that need to be on an earlier version, you would need to stay pointed at EL 7 for those packages to continue to resolve as available. The following code shows how to do that for GitLab Runner.\n\n```bash\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA of gitlab repo script)\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n```\n\n## Need-to-know takeaways\n\n- Amazon Linux 2 is a supported distro for GitLab instances and runner as of the release of version 15.0 on May 22, 2022.\n- Amazon Linux 2 packages are available for x86 and ARM for GitLab Version 14.9.0 and higher. (Prior to 14.9.0 the EL7 packages must be used as they have a long version history).\n- This is the first availability of ARM RPM packages of GitLab for Amazon Linux 2.\n- In 15.3 (August 22, 2022), the script.rpm.sh will automatically start directing to the Amazon Linux 2 packages where it had previously directed Amazon Linux 2 yum requests to the EL7 packages.\n- It is common to have taken a dependency directly on the latest version of this GitLab script in other automation.\n- Before the GA cutover date of August 22, 2022 (15.3.0 GitLab Release), for these scripts, you have the opportunity to pre-test these packages and determine whether they create any issues with your automation or GitLab configuration.\n- You can also peg to the Amazon Linux 2 packages early or peg to the EL7 packages in advance if you find problems that you need more time to resolve. Both of these pegging types are idempotent, meaning the code changes do not do anything that causes problems after the change over happens.\n- Existing Amazon Linux 2 installations that were installed using the EL7 packages can use a regular yum upgrade command to start using the new Amazon Linux 2 packages. This operation may also be an upgrade of the product version at the same time. For existing installations you will need to patch the yum repo files as explained in this article in order to upgrade directly to Amazon Linux 2 from EL7 using packages. \n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/awsgravitonready.png){: .right}\n","news",[731,732,733,687,9],"releases","CI","CD",{"slug":735,"featured":6,"template":690},"amazon-linux-2-support-and-distro-specific-packages","content:en-us:blog:amazon-linux-2-support-and-distro-specific-packages.yml","Amazon Linux 2 Support And Distro Specific Packages","en-us/blog/amazon-linux-2-support-and-distro-specific-packages.yml","en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"_path":741,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":742,"content":748,"config":755,"_id":757,"_type":14,"title":758,"_source":16,"_file":759,"_stem":760,"_extension":19},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":743,"description":744,"ogTitle":743,"ogDescription":744,"noIndex":6,"ogImage":745,"ogUrl":746,"ogSiteName":673,"ogType":674,"canonicalUrls":746,"schema":747},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":743,"description":744,"authors":749,"heroImage":745,"date":751,"body":752,"category":708,"tags":753},[750],"Tim Rizzi","2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic Container Registry (ECR) to GitLab. Can you help?\" This question kept coming up in conversations with platform engineers. They were modernizing their DevSecOps toolchain with GitLab but got stuck when faced with moving their container images. While each image transfer is simple, the sheer volume made it daunting.\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done – pull, retag, push. But I have 200 microservices, each with multiple tags. I can't justify spending weeks on this migration when I have critical infrastructure work.\"\n\n## The challenge\n\nThat conversation sparked an idea. What if we could automate the entire process? When platform teams move their [CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating container images shouldn't be the bottleneck. The manual process is straightforward but repetitive – pull each image, retag it, and push it to GitLab's Container Registry. Multiply this by dozens of repositories and multiple tags per image, and you're looking at days or weeks of tedious work.\n\n## The solution\n\nWe set out to create a GitLab pipeline that would automatically do all this heavy lifting. The goal was simple: Give platform engineers a tool they could set up in minutes and let run overnight, waking up to find all their images migrated successfully.\n\n### Setting up access\n\nFirst things first – security. We wanted to ensure teams could run this migration with minimal AWS permissions. Here's the read-only identity and access management (IAM) policy you'll need:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n```\n\n### GitLab configuration\n\nWith security handled, the next step is setting up GitLab. We kept this minimal - you'll need to configure these variables in your CI/CD settings:\n\n```\nAWS_ACCOUNT_ID: Your AWS account number\nAWS_DEFAULT_REGION: Your ECR region\nAWS_ACCESS_KEY_ID: [Masked]\nAWS_SECRET_ACCESS_KEY: [Masked]\nBULK_MIGRATE: true\n```\n\n### The migration pipeline\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker to handle all the image operations reliably:\n\n```yaml\nimage: docker:20.10\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\nThe pipeline works in three phases, each building on the last:\n\n1. Discovery\n\nFirst, it finds all your repositories:\n\n```bash\nREPOS=$(aws ecr describe-repositories --query 'repositories[*].repositoryName' --output text)\n```\n\n2. Tag enumeration\n\nThen, for each repository, it gets all the tags:\n\n```bash\nTAGS=$(aws ecr describe-images --repository-name $repo --query 'imageDetails[*].imageTags[]' --output text)\n```\n\n3. Transfer\n\nFinally, it handles the actual migration:\n\n```bash\ndocker pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\ndocker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag} ${CI_REGISTRY_IMAGE}/${repo}:${tag}\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n```\n\n## What you get\n\nRemember that platform engineer who didn't want to spend weeks on migration? Here's what this solution delivers:\n\n- automated discovery and migration of all repositories and tags\n- consistent image naming between ECR and GitLab\n- error handling for failed transfers\n- clear logging for tracking progress\n\nInstead of writing scripts and babysitting the migration, the platform engineer could focus on more valuable work.\n\n## Usage\n\nGetting started is straightforward:\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n2. Configure the AWS and GitLab variables.\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n## Best practices\n\nThrough helping teams with their migrations, we've learned a few things:\n\n- Run during off-peak hours to minimize the impact on your team.\n- Keep an eye on the pipeline logs - they'll tell you if anything needs attention.\n- Don't decommission ECR until you've verified all images transferred successfully.\n- For very large migrations, consider adding rate limiting to avoid overwhelming your network\n\nWe've open-sourced this pipeline in our public GitLab repository because we believe platform engineers should spend time building valuable infrastructure, not copying container images. Feel free to adapt it for your needs or ask questions about implementation.\n\n> #### Get started with this and other package components with our [CI/CD Catalog documentation](https://gitlab.com/explore/catalog/components/package).",[109,9,687,482,685,754],"solutions architecture",{"slug":756,"featured":91,"template":690},"automating-container-image-migration-from-amazon-ecr-to-gitlab","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":762,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":763,"content":769,"config":775,"_id":777,"_type":14,"title":778,"_source":16,"_file":779,"_stem":780,"_extension":19},"/en-us/blog/aws-devsecops-competency-partner",{"title":764,"description":765,"ogTitle":764,"ogDescription":765,"noIndex":6,"ogImage":766,"ogUrl":767,"ogSiteName":673,"ogType":674,"canonicalUrls":767,"schema":768},"GitLab achieves the AWS DevSecOps Partner Competency Specialty","The AWS DevSecOps Partner Competency Specialty demonstrates that GitLab is instrumental in helping customers implement better security while continuing to innovate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668799/Blog/Hero%20Images/securitylifecycle.png","https://about.gitlab.com/blog/aws-devsecops-competency-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab achieves the AWS DevSecOps Partner Competency Specialty\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-09-25\",\n      }",{"title":764,"description":765,"authors":770,"heroImage":766,"date":771,"body":772,"category":773,"tags":774},[705],"2023-09-25","\nGitLab recently achieved AWS's DevSecOps Partner Competency desigation, a sub-specialty for the [AWS DevOps ISV Partner Competency](https://partners.amazonaws.com/partners/001E0000018YWFfIAO/GitLab,%20Inc) category. GitLab also holds the AWS DevOps ISV Partner Competency designation. AWS's partner qualification program signifies to customers that AWS has vetted GitLab's capabilities and use cases.\n\n> Attending [AWS re:Invent 2023](https://reinvent.awsevents.com/)? Find us at Booth 1152.\n\nAccording to AWS, solutions in the [DevSecOps category](https://aws.amazon.com/devops/partner-solutions/?blog-posts-cards.sort-by=item.additionalFields.createdDate&blog-posts-cards.sort-order=desc&partner-case-studies-cards.sort-by=item.additionalFields.sortDate&partner-case-studies-cards.sort-order=desc) \"make it easy for customers to integrate security across every stage of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\" The designation comprises a [validation checklist](https://apn-checklists.s3.amazonaws.com/competency/devops/technology/CenAm4qx8.html#competencyCategories) and attestation that GitLab's DevSecOps Platform meets AWS’s expectations.\n\n## GitLab's strength in DevSecOps\nGitLab's [AI-powered DevSecOps platform](https://about.gitlab.com/gitlab-duo/) helps organizations shift left on vulnerability remediation. At GitLab, shifting left means ensuring developers have a frictionless security defect remediation experience that enables them to immediately handle vulnerabilities in their code.\n\nGitLab's DevSecOps Platform:\n- surfaces security findings shortly after they are introduced and while the code is still being worked on\n- associates findings directly with those who changed the code\n- offers remediation guidance (including on-demand training and automated fixes)\n- supports rich, in-context collaboration for vulnerability management\n\n![GitLab + AWS Workflow](https://about.gitlab.com/images/blogimages/aws/devsecops-post/gitlabawsworkflow.png)\n\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/aws/devopsisvpartner.png){: .right}\n","devsecops",[9,282,710],{"slug":776,"featured":6,"template":690},"aws-devsecops-competency-partner","content:en-us:blog:aws-devsecops-competency-partner.yml","Aws Devsecops Competency Partner","en-us/blog/aws-devsecops-competency-partner.yml","en-us/blog/aws-devsecops-competency-partner",{"_path":782,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":783,"content":789,"config":798,"_id":800,"_type":14,"title":801,"_source":16,"_file":802,"_stem":803,"_extension":19},"/en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"title":784,"description":785,"ogTitle":784,"ogDescription":785,"noIndex":6,"ogImage":786,"ogUrl":787,"ogSiteName":673,"ogType":674,"canonicalUrls":787,"schema":788},"Building GitLab with GitLab: How GitLab.com inspired Dedicated","Learn how the multi-tenancy SaaS solution, GitLab.com, influenced the design of the single-tenancy SaaS, GitLab Dedicated.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: How GitLab.com inspired Dedicated\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"},{\"@type\":\"Person\",\"name\":\"Craig Miskell\"},{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2023-08-03\",\n      }",{"title":784,"description":785,"authors":790,"heroImage":786,"date":794,"body":795,"category":708,"tags":796},[791,792,793],"Andrew Newdigate","Craig Miskell","John Coghlan","2023-08-03","\nEarlier this year, we announced [the general availability of GitLab Dedicated](https://about.gitlab.com/blog/gitlab-dedicated-available/), our single-tenancy software-as-a-service (SaaS) offering. Dedicated, which addresses the needs of customers with stringent compliance requirements while maintaining speed, efficiency, and security, was developed from the lessons we learned building and using GitLab.com, our multi-tenancy model. Although there is overlap in how we manage both platforms, such as the same service-level monitoring stack, there were significant considerations that sparked the need for new design decisions, including how we approach automation, databases, monitoring, and availability. In this blog, we share some of those decision points and their outcomes.\n\n## GitLab platform options\nBefore we dive into the evolution of GitLab Dedicated, let’s level-set on GitLab’s [portfolio of platform models](https://docs.gitlab.com/ee/subscriptions/choosing_subscription.html#choose-a-subscription):\n- GitLab.com, a.k.a. multi-tenant GitLab SaaS on our pricing page and in our documentation\n- GitLab Dedicated, single-tenant SaaS that satisfies compliance requirements such as data residency, isolation, and private networking\n- GitLab self-managed, in which customers install, administer, and maintain their own GitLab instance\n\nEach method meets the different needs of our wide range of customers and requires a unique approach for how we create, package, and deploy the application.\n\nWhile both GitLab.com and Dedicated are SaaS-based, there are key differences between the two. The multi-tenant GitLab.com is the largest hosted instance of GitLab and services thousands of customers and millions of users. Because the platform's reliability is critical to so many customers and because of the iterative nature of how GitLab.com was built, decisions have been made along the way that are unique to the scale of this specific instance.\n\nIn contrast, GitLab Dedicated is a single-tenant SaaS application that is hosted by GitLab in the customer's region of choice (GitLab.com is hosted in the U.S.). While still providing a GitLab-managed SaaS solution for our customers, Dedicated instances are fully isolated from one another, running on a platform that automates the configuration and provisioning of the instances, along with automating as many of the day-two operations as possible, such as maintenance, monitoring, and optimization.\n\nHere are some examples of how Dedicated has used the blueprint of GitLab.com.\n\n## Improved automated deployments\nGitLab.com is a permanent installation with a great deal of history, having evolved significantly since it was first developed. Originally, it was deployed on a single instance in Amazon AWS, before migrating to Microsoft Azure, where it continued to scale out. From Azure, it migrated to its current cloud, Google Cloud Platform. Since then, many customer workloads have [migrated into Kubernetes](https://about.gitlab.com/blog/year-of-kubernetes/) and are supported by the Google Kubernetes Engine ([GKE](https://cloud.google.com/kubernetes-engine)).\n\nWith GitLab Dedicated, we're building smaller instances that rely on automation, repeatability, and deterministic environments. All customer tenant GitLab instance operations must be 100% automated, including provisioning, upgrades, scaling, configuration changes, and any other routine operations. The stack relies heavily on the GitLab Environment Toolkit ([GET](https://gitlab.com/gitlab-org/gitlab-environment-toolkit/-/blob/main/docs/environment_advanced_hybrid.md)) Cloud Native Hybrid, which uses the GitLab Helm charts for stateless workloads (e.g., Rails) and Omnibus for deployments to VMs (e.g., Gitaly). GET helps with the deployments targeting [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/) and coordinating the provisioning of cloud resources, including compute instances, Kubernetes clusters, managed Postgres databases and more.\n\nAs much as GET automates, it has a certain amount of required setup, which is acceptable to perform manually for one-off or otherwise long-lived deployments, but in order to scale Dedicated we also had to automate that process, which we did with Terraform. Because this was a greenfield approach, we were able to be particularly careful with privileges. Our current cloud deployment target is AWS, so we developed a detailed identity and access management ([IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)) policy to grant each stage of deployment only the strictly necessary access. We also use IAM role assumption from trusted workloads in a central AWS account to eliminate the need for explicit credentials.  \n\nDeployments follow this process in order:\n- An account creation job running from a trusted location creates a fresh AWS account in an [AWS Organization](https://docs.aws.amazon.com/organizations/index.html), placing it in the correct Organizational Unit to automatically have a [CloudFormation StackSet](https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-cloudformation.html) applied, with ongoing updates handled by AWS when needed. This allows us to operate the entire lifecycle of the tenant account using IAM Role Assumption rather than generating and storing static IAM credentials.\n- Prepare stage sets up a fresh AWS account ready to receive a deployment; the privileges are quite high powered, but still limited to the necessary areas, including creating the next role.  \n- Onboard stage creates some high-level resources and otherwise does the setup that GET requires to be able to run, including creating the roles for the next stages with their own limited privileges.  \n- Provision stage is mostly about running GET Terraform and creating the compute and storage resources onto which GitLab will be deployed, with a few additions for our specific needs.  \n- Configure stage runs to deploy the GitLab application onto the resources created earlier. At its core, this is the GET Ansible stage, but it includes our own Terraform wrapper as well to handle our specific needs.\n\nOnce these stages complete, a fully deployed GitLab instance is ready to go.  \n\nConfiguration changes and GitLab upgrades execute the same set of stages, ensuring everything is still configured correctly and applying any pending changes. In the early days of GitLab Dedicated this was done in GitLab CI/CD pipelines operating on GitLab.com, with the tenant descriptions as JSON files in a repository, which was an effective and simple place to start.  \n\nHowever, this multi-stage deployment is now managed by [Switchboard](https://about.gitlab.com/direction/saas-platforms/switchboard/), a portal we built specifically for GitLab Dedicated. Switchboard is a bespoke Rails application, which will be the single source of truth for configuration, accessible by customers to manage customer-facing settings, as well as GitLab Dedicated staff for general management. Switchboard will be responsible for automating regular upgrades, including gradual rollouts across the fleet of Dedicated instances.\n\n## Databases geared towards the needs of single tenancy\nGitLab.com uses self-managed Postgres and Redis. For GitLab Dedicated, we wanted to leverage AWS’s managed services as much as possible. Examples include RDS, Elasticache, and OpenSearch, the AWS Elasticsearch managed service. Some of these services may not always be able to support GitLab.com-scale platforms, but they handle the traffic of a single-tenant instance well and provide reliable failovers and ongoing maintenance with no effort on our part.\n\n## Monitoring aligned with strict compliance needs\nThe observability stack for GitLab Dedicated relies on the expertise we gained from building GitLab.com. The monitoring, logging, and availability infrastructure is all maintained within the customer's AWS account, nothing is shared. We receive low-context alerts from these private systems. They serve as a mechanism to direct us to the customer account so we can review what is going on and triage the underlying issues if needed. This is helpful with regulators and compliance as nothing can leak because it doesn't leave the system.\n\nWhile Dedicated and GitLab.com share much of the same monitoring stack, Dedicated instances have tended to reveal different issues within our application. This is due to GitLab.com being a multi-tenant instance, while GitLab Dedicated instances are single-tenant. \n\nThink of the adage, \"[Your 9s are not my 9s](https://rachelbythebay.com/w/2019/07/15/giant/).\" In a platform at the scale of GitLab.com, a subset of users who encounter an issue in part of the application may be a very small percentage of the overall user base. The small impact relative to the scale of the platform may not create an alert. In a single-tenant instance, however, the same bugs or scaling issues can quickly impact a higher percentage of the overall users of the instance, escalating the issue's importance. Applying our service-level monitoring to single-tenant GitLab instances has benefited GitLab users who had encountered bugs that were overlooked in the volume of GitLab.com usage. When we identify issues in a Dedicated instance, we resolve them within the product.\n\n## High availability for all components\nConsidering the hybrid environment and the level of service that we want to offer to our customers, we have made some minor changes from the [standard reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/).\n\nOne such change is introducing high availability for all components. For the lower size (i.e., up to 2,000 users), our architecture ships by default with all the components in full redundant mode. Components like RDS and Elasticache will have a replica in a different Availability Zone. This is referred to as the primary region and we have to define how it will look in the [Geo replicas](https://docs.gitlab.com/ee/administration/geo/setup/database.html).\n\n## Only on Dedicated\nIn addition to the other changes we made, we also built some features that are only used for GitLab Dedicated:\n- Bring your own key - customers can provide and manage the encryption keys used to encrypt AWS resources such as storage, allowing a customer to revoke access should that ever become necessary. This is not something that can be offered in a multi-tenant system like GitLab.com.\n- Switchboard - as mentioned above, Switchboard was purpose-built for Dedicated. It is a multi-tenant Ruby on Rails application, accessible by GitLab Dedicated customer administrators and GitLab Dedicated team members. Using this interface, customers can change the available application runtime settings, access provided graphs, add additional products, and more. The main Switchboard instance serves as a single source of truth for global configuration and status across multiple cloud providers and regions.\n- PrivateLink networking - allows traffic between tenant AWS accounts and customer accounts without exposing data to the internet. \n- Other network features - including traffic filtering and private hosted zones.\n\nDedicated has been an exciting project and a great learning experience for our team. We were able to apply the knowledge accumulated in building GitLab.com to deliver an important new product for our customers in a very efficient way. You can learn more about GitLab Dedicated by visiting our [Dedicated page](https://about.gitlab.com/dedicated/) or contacting a GitLab sales representative.\n\n_Check out the [first installment in our \"Building GitLab with GitLab\" series](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/), which takes you behind the scenes of the development of our web API fuzz testing._\n",[9,482,797,686],"DevSecOps",{"slug":799,"featured":6,"template":690},"building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","content:en-us:blog:building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","Building Gitlab With Gitlabcom How Gitlab Inspired Dedicated","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"_path":805,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":806,"content":812,"config":818,"_id":820,"_type":14,"title":821,"_source":16,"_file":822,"_stem":823,"_extension":19},"/en-us/blog/dag-manual-fix",{"title":807,"description":808,"ogTitle":807,"ogDescription":808,"noIndex":6,"ogImage":809,"ogUrl":810,"ogSiteName":673,"ogType":674,"canonicalUrls":810,"schema":811},"How to use manual jobs with `needs:` relationships","Are you using manual jobs and needs relationship in your CI/CD pipeline? Learn more about the fix that might cause your pipeline to behave differently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683170/Blog/Hero%20Images/blog_cover2.png","https://about.gitlab.com/blog/dag-manual-fix","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use manual jobs with `needs:` relationships\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":807,"description":808,"authors":813,"heroImage":809,"date":815,"body":816,"category":708,"tags":817},[814],"Dov Hershkovitch","2021-05-20","\n\n## A bug when job `needs` a manual job\n\nIn [13.12 we fixed a bug](https://gitlab.com/gitlab-org/gitlab/-/issues/31264) that might affect the existing behavior of your pipeline. We explain why we had to fix the bug, the possible impact of this change on your pipeline, and the proposed workaround if you would like to revert this behavior.\n\n## Background on a two-job pipeline\n\nIn GitLab CI/CD you can easily configure a job to require manual intervention before it runs. The job gets added to the pipeline, but doesn't run until you click the **play** button on it.\n\nLet's look at a two-job pipeline:\n\n```yaml\nstages:\n  - stage1\n  - stage2\n\njob1:\n  stage: stage1\n  script:\n    - echo \"this is an automatic job\"\n\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual # This setting turns a job into a manual one\n```\n\nThis is how it looks when we look at the pipeline graph:\n\n![image2](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog1.png){: .shadow.medium.center.wrap-text}\n\nNotice that the manual job gets skipped, and the pipeline completes successfully even though the manual job did not get triggered. This happens because manual jobs are considered optional, and do not need to run.\n\nInternally, manual jobs have `allow_failure` set to true by default, which means that these skipped manual jobs do not cause a pipeline failure. The YAML code below demonstrates how to write the manual job, which results in the same behavior. The job doesn't automatically start, is skipped, and the pipeline passes.\n\n```yaml\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual\n  allow_failure: true # this line is redundant since manual job has this setting by default\n```\n\nYou can set `allow_failure` to true for any job, including both manual and automatic jobs, and then the pipeline does not care if the job runs successfully or not.\n\n### How to expand the configuration with `needs` (DAG)\n\n  Last year we introduced the [`needs` keyword which lets you create a Directed Acyclic Graphs (DAG) to speed up your pipeline](https://docs.gitlab.com/ee/ci/yaml/#needs). The `needs` keyword creates a dependency between two jobs regardless of their stage.\n\nLet's look at this example:\n\n```yaml\nstages:\n  - stage1\n  ....\n  - stage10\n\njob1: # this is the first job that runs in the pipeline\n  stage: stage1\n  script:\n    - echo \"exit 0\"\n.....\n\njob10:\n  needs:  # Defined a \"needs\" relationship with job1\n    - job1\n  stage: stage10\n  script:\n    - echo \"This job runs as soon as job1 completes, even though this job is in stage10.\"\n```\n\nThe `needs` keyword creates a dependency between the two jobs, so `job10` runs as soon as `job1` **finishes running** successfully, regardless of the stage ordering.\n\nSo what happens if a job `needs` a manual job, that doesn't start running automatically?\n\nLet's look at the following example:\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: echo \"when should this job run?\"\n  needs:\n    - test\n```\n\nBefore 13.12, this type of configuration would cause the pipeline to get stuck. The `deploy` job can only start when the `test` job completes, but the `test` job does not start automatically. The rest of the pipeline stops and waits for someone to run the manual `test` job.\n\n![image3](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog2.png){: .shadow.medium.center.wrap-text}\n\nThis behavior is even worse with larger pipelines:\n\n![image4](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog3.png){: .shadow.medium.center.wrap-text}\n\nThe example above shows there is a needs relationship between `post test` job and the `test` job (which is a manual job) as you can see the pipeline is stuck in a running state and any subsequent jobs will not run.\n\nThis was not the behavior most users expected, so we improved it in 13.12. Now, if there is a `needs` relationship pointing to a manual job, the pipeline doesn't stop by default anymore. The manual job is considered optional by default in all cases now. Any jobs that have a `needs` relationship to manual jobs are now also considered optional and skipped if the manual job isn't triggered. If you start the manual job, the jobs that need it can start after it completes.\n\nNote that if you start the manual job before a later job that has it in a `needs` configuration, the later job will still wait for the manual job to finishes running.\n\n## What if I don't want this new behavior?\n\nOne of the reasons we selected this solution is that you can quickly revert this change. If you made use of this inadvertent behavior and configured your pipelines to use it to block on manual jobs, it's easy to return to that previous behavior. All you have to do is override the default `allow_failure` in the manual job with `allow_failure: false`. This way the manual job is no longer optional, and the pipeline status will be marked as blocked and wait for you to run the job manually.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  allow_failure: false  # Set to false to return to the previous behavior.\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: exit 0\n  needs:\n    - test\n```\n\nShare any thoughts, comments, or questions, by opening an issue in GitLab and mentioning me (`@dhershkovitch`).\n",[732,9,710],{"slug":819,"featured":6,"template":690},"dag-manual-fix","content:en-us:blog:dag-manual-fix.yml","Dag Manual Fix","en-us/blog/dag-manual-fix.yml","en-us/blog/dag-manual-fix",{"_path":825,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":826,"content":832,"config":838,"_id":840,"_type":14,"title":841,"_source":16,"_file":842,"_stem":843,"_extension":19},"/en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"title":827,"description":828,"ogTitle":827,"ogDescription":828,"noIndex":6,"ogImage":829,"ogUrl":830,"ogSiteName":673,"ogType":674,"canonicalUrls":830,"schema":831},"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS","Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2025-01-16\",\n      }",{"title":827,"description":828,"authors":833,"heroImage":829,"date":835,"body":836,"category":681,"tags":837},[834],"Jackie Porter","2025-01-16","We are thrilled to announce the GitLab Duo with Amazon Q offering, previously [shared at AWS 2024 re:Invent](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai/), is now available in Preview (Beta) for GitLab Self-Managed Ultimate users, at no additional cost. This milestone brings AI agentic experiences to organizations that maintain their own GitLab instance.\n\n### What does this mean for GitLab Self-Managed Ultimate customers? \n\nBeginning in our [17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/), GitLab Self-Managed Ultimate customers can now take advantage of the GitLab Duo with Amazon Q Preview (Beta) [capabilities](https://docs.gitlab.com/ee/user/duo_amazon_q/). There are three key experiences you will be able to access: \n- AI-powered feature development: Use the `/q dev` quick action to transform requirements into merge-ready code.\n- Automated code reviews: Leverage `/q review` for instant, intelligent feedback on code quality and security.\n- Java modernization: Streamline Java application upgrades with `/q transform`.\n\n### Getting started with the Preview (Beta) \n\nTo use these capabilities in your GitLab Self-Managed Ultimate instance:\n\n- Ensure you meet the [prerequisites](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html#prerequisites), including upgrading to GitLab 17.8, have an Ultimate subscription (no trial access), and have the instance hosted on AWS. \n- Enable your GitLab Duo with Amazon Q integration settings.\n- Configure IAM identity and roles in AWS and the GitLab AI gateway.\n- Add the Amazon Q user to the project. \n\nFor more detailed setup information, see our [documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). \n\n### Looking ahead\n\nThis Preview release represents our commitment to bringing enterprise-grade AI capabilities to all GitLab Ultimate customers. We're excited to work closely with our customers during this Preview (Beta) period to ensure GitLab Duo with Amazon Q delivers a superior experience. \nWe encourage GitLab Self-Managed Ultimate customers to begin exploring these capabilities and provide feedback. Your input will be invaluable in shaping the future of AI-powered development in GitLab.\n\n### Get started today \n\nGitLab Self-Managed Ultimate customers can begin enabling and configuring GitLab Duo with Amazon Q as outlined in our [setup documentation](https://docs.gitlab.com/ee/user/duo_amazon_q/setup.html). To learn more about how the Preview (Beta) release can transform your software development, visit our [website](https://about.gitlab.com/partners/technology-partners/aws/#interest). \nStay tuned for regular updates as we continue to enhance and expand the capabilities of GitLab Duo with Amazon Q.\n",[683,482,685,9,729],{"slug":839,"featured":6,"template":690},"devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws","content:en-us:blog:devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","Devsecops Agentic Ai Now On Gitlab Self Managed Ultimate On Aws","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws.yml","en-us/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws",{"_path":845,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":846,"content":852,"config":858,"_id":860,"_type":14,"title":861,"_source":16,"_file":862,"_stem":863,"_extension":19},"/en-us/blog/eks-fargate-runner",{"title":847,"description":848,"ogTitle":847,"ogDescription":848,"noIndex":6,"ogImage":849,"ogUrl":850,"ogSiteName":673,"ogType":674,"canonicalUrls":850,"schema":851},"Setting up GitLab EKS Fargate Runners in just one hour","This detailed tutorial answers the question of how to leverage Amazon's AWS Fargate container technology for GitLab Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663373/Blog/Hero%20Images/jeremy-lapak-CVvFVQ_-oUg-700unsplash.jpg","https://about.gitlab.com/blog/eks-fargate-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-05-24\",\n      }",{"title":853,"description":848,"authors":854,"heroImage":849,"date":855,"body":856,"category":708,"tags":857},"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1",[705],"2023-05-24","\nLeveraging Amazon's AWS Fargate container technology for [GitLab Runners](https://docs.gitlab.com/runner/) has been a longstanding ask from our customers. This tutorial gets you up and running with the GitLab EKS Fargate Runner combo in less than an hour.\n\nGitLab has a pattern for this task for [Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html) runners under AWS Elastic Container Service (ECS). The primary challenge with this solution is that AWS ECS itself does not allow for the overriding of what image is used when calling an ECS task. Therefore, each GitLab Runner manager ignores the gitlab-ci.yml `image:` tag and runs on the image preconfigured in the task during deployment of the runner manager. As a result, you'll end up creating runner container images that contain every dependency for all the software built by the runner, or you'll create a lot of runner managers per image — or both.\n\nI have long wondered if Fargate-backed Elastic Kubernetes Service (EKS) could get around this limitation since, by nature, Kubernetes must be able to run any image given to it.\n\n## The approach\n\nNothing takes the joy out of learning faster than a lot of complex setup before being able to get to the point of the exercise. To address this, this tutorial uses four things to dramatically reduce the time and steps required to get from zero to hero.\n\n1. AWS CloudShell to minimize the EKS Admin Tooling setup. This also leaves your local machine environment untouched so that other tooling configurations don't get modified.\n2. A project called **AWS CloudShell ”Run From Web” Configuration Scripts** to rapidly add additional tooling to CloudShell. This includes some hacks to get large Terraform templates to work on AWS CloudShell.\n3. EKS Blueprints — specifically, a Terraform example that implements both the [Karpenter autoscaler](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/) and Fargate, including for the kube-system namespace.\n4. A simple Helm install for GitLab Runner.\n\nAlthough you will be running CLI commands and editing config files, no coding is required in the sense that you won't have to build something complex from scratch and then maintain it yourself.\n\n## The results\n\nIt works! It can run 2 x 200 (max allowed per job) parallel “Hello, World” jobs on AWS Fargate-backed EKS in about 4 minutes, which demonstrates the unlimited scalability. It can also run a simple Auto DevOps pipeline, which proves out the ability to run a bunch of different containers.\n\nThe fact that the entire cluster - including kube-system - is Fargate backed reduces the Kubernetes specific long term SRE work to a much lower value approaching that of ECS Fargate clusters. Later on we discuss that this trade-off has a cost and how it can be reconfigured.\n\n## What makes it possible: Product-managed IaC that is an extensible framework\n\nToolkitting made up of Infrastructure as Code (IaC) is frequently referred to as “templates,” and these templates have a reputation of not aging well because there is no active stewardship of the codebase — they are thought of as a one-and-done effort. However, this term does not reflect reality well when the underlying IaC code is actually being product-managed. You can tell if something is being product-managed by using these markers:\n\n- It has a scope-bounded vision of what it wants to do for the community being served (customer).\n- It has active stewardship that keeps the codebase moving along, even if it is open source.\n- It seeks to incorporate strategic enhancements, a.k.a. new features.\n- Things that are broken are considered bugs and are actively eliminated.\n- There is a cadence of taking underlying version updates and for supporting new versions of the primary things they deploy.\n\nAs an extensible framework, EKS Blueprints:\n\n- Are purposefully architected to be extended by anyone.\n- Already have many extensions built.\n\nWhen implementing using EKS Blueprints and you come upon a new need, it is important to check if EKS Blueprints already handles that consideration - similarly to how you would look for Ruby Gems, NPM Modules or Python PyPI packages before building functionality from scratch.\n\nAll of the above are aspects of how the AWS EKS team is product-managing EKS Blueprints. They deserve a big round of applause because product-managing anything to prevent it from becoming yet another community-maintained shelfware project is a strong commitment that requires tenacity!\n\n## Reproducing the experiment\n\n### 1. Set up AWS CloudShell\n\n> **Note:** If you already have a fully persistent environment setup (like your laptop) with: AWS CLI, kubectl, Terraform, then you can avoid environment rebuilds when AWS CloudShell times out by using that instead.\n\nAWS CloudShell comes with kubectl, Git, and AWS CLI, which are all needed. However, we also need a few other scripts. More information about these scripts can be read in [my blog post on AWS CloudShell “Run For Web” Configuration Scripts](https://missionimpossiblecode.io/aws-cloudshell-run-from-web-configuration-scripts).\n\n> **Note:** The steps in this section up through the `git clone` from GitLab step (second clone operation) in the next section can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}*` .\n\n1. Use the web console to login to an AWS account where you have admin permissions.\n2. Switch to the region of your choosing.\n3. In the bottom left of the console click the “CloudShell” icon.\n4. Copy and paste the following one-liner into the console to install Helm, Terraform, and the Nano text editor:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n5. Since our Terraform template will grow larger than the 1GB limit of space in the $HOME directory, we need a workaround to use the template in one directory, but store the Terraform state in $HOME where it will be kept as long as 120 days. The following one-liner triggers a script that performs that setup for us, after which we can use the /terraform directory for our template:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n\n### 2. Run Terraform EKS Blueprint\n\n> **Note:** If at any time you leave your AWS CloudShell long enough for your session to end, the /terraform directory will be tossed. Simply run the last script above and the first four steps below to make it operable again. This will most likely be necessary when it comes time to teardown the Terraform created AWS resources.\n>\n> Sometimes your AWS CloudShell credentials may expire with a message like: `Error: Kubernetes cluster unreachable: Get \">CLUSTER URL>\": getting credentials: exec: executable aws failed with exit code 255`. Simply refresh the entire browser tab where AWS CloudShell is running and you’ll generally have new credentials.\n\n#### Version safety\n\nThis tutorial uses a specific release of the EKS Blueprint project so that you have the known state at the time of publishing. The project version also cascades into the versions of all the many dependent modules. While it may also work with the latest version, the version at the time of writing was Version 4.29.0.\n\nThis tutorial also uses Terraform binary Version 1.4.5.\n\n#### Procedures\n\nIf, while using AWS CloudShell, you experience this error: `Error: configuring Terraform AWS Provider: no valid credential sources for Terraform AWS Provider found`, you will need to refresh your browser to update the cached credentials in the terminal session.\n\nPerform the following commands on the AWS CloudShell session:\n\n1. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n2. `cd /terraform/terraform-aws-eks-blueprints/`\n3. `git reset --hard tags/v4.29.0` #Version pegging to the code that this article was authored with.\n4. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   **Note:** Like other EKS Blueprints examples, the GitLab EKS Fargate Runner example references EKS Blueprint modules with a relative directory reference. This is why we are cloning it into a subdirectory of the EKS Blueprints project.\n5. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n6. `terraform init`\n\n   **Important**: If you are using AWS CloudShell and your session times out, the /terraform folder and the installed utilities will be gone. You would have to reproduce the above steps to get the Terraform template in a usable state again. This is most likely to happen when you go to use Terraform to delete the stack after playing with it for some days.\n\n   The next few instructions are from: **https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-deploy**. Note the `-state` switch ensures our state is in persistent storage.\n7. `terraform apply -target module.vpc -state=$HOME/tfstate/runner.tfstate`\n8. `terraform apply -target module.eks -state=$HOME/tfstate/runner.tfstate`\n9. **Note:** If you receive “Error: The configmap ”aws-auth” does not exist”, re-run the same command - it will usually update successfully.\n10. `terraform apply -state=$HOME/tfstate/runner.tfstate`\n\nThe previous command will output a kubeconfig command that needs to be run to ensure subsequent kubectl commands work. Run that command. If you are in AWS CloudShell and did not copy the command, this command should work and map to the correct region:\n    `aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name \"glrunner\"`\n\nIf everything was done correctly, you will have an EKS cluster named `karpenter` in the CloudShell region web console like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/eksclusterinconsole.png)  \n\nAnd the output of this console command `kubectl get pods -A` will look like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/cliplaincluster.png)\n\nThe output of this console command `kubectl get nodes -A` will show the Fargate prefix:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/clinodesarefargate.png)\n\n> **Note:** Notice that all the EKS extras (coredns, ebs-cni, and karpenter itself) are also running on Fargate. If you are willing to tolerate some regular Kubernetes nodes, you may be able to save cost by running always-on pods on regular Kubernetes hosts. Since this cluster runs Karpenter, you will not need to manually scale those hosts and EKS makes control plane and node updates easier.\n\n### 3. Install GitLab Runner\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n1. Create an empty GitLab project.\n2. Retrieve a GitLab Runner Token from the project. Keep in mind that using a project token is the easiest way to ensure your experiment runs only on the EKS Fargate Runner. Using a group token may cause your job to run on other runners already setup at your company. You can follow [“Obtain a token”](https://docs.gitlab.com/runner/register/#requirements) from the documentation if you need to.\n3. Perform the following commands back in the AWS CloudShell session.\n4. `nano runnerregistration.yaml`\n5. Paste the following:\n\n   ```yaml\n   gitlabUrl: https://_YOUR_GITLAB_URL_HERE_.com\n   runnerRegistrationToken: _YOUR_GITLAB_RUNNER_TOKEN_HERE_\n   concurrent: 200\n   rbac:\n     create: true\n   runners:\n     tags: eks-fargate\n     runUntagged: true\n     imagePullPolicy: if-not-present\n   envVars:\n     - name: KUBERNETES_POLL_TIMEOUT\n       value: 90  \n   ```\n\n   **Note:** Many more settings are discussed in the documentation for the [Kubernetes Executor](https://docs.gitlab.com/runner/executors/kubernetes.html). \n\n**Hard Lesson:** Using a setting for `concurrent` that is lower than our `parallel` setting in the GitLab job below results in all kinds of failures due to some job pods having to wait for an execution slot. Since it’s Fargate, there is no savings to keeping it lower and no negative impact to making it the complete parallel amount.\n\n6. Replace \\_YOUR_GITLAB_URL_HERE_ with your actual GitLab URL.\n7. Replace \\_YOUR_GITLAB_RUNNER_TOKEN_HERE_ with your actual runner token.\n8. Press CTRL-X to exit and press Y to the save prompt.\n9. `helm repo add gitlab https://charts.gitlab.io`\n10. `helm repo update gitlab`\n11. `helm install --namespace gitlab-runner --create-namespace runner1 -f runnerregistration.yaml gitlab/gitlab-runner`\n12. Wait for a few minutes and check the project’s list of runners for a new one with the tag `eks-fargate`\n\nIn AWS CloudShell the command `kubectl get pods -n gitlab-runner` should produce output similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/runnerlist.png)\n\nAnd in the GitLab Runner list, it will look similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/glrunnerlist.png)\n\n### 4. Run a test job\n\nThe simplest way to test GitLab Runner scaling is using the `parallel:` keyword to schedule multiple copies of a job. It can also be used to create a job matrix where not all jobs do the same thing.\n\nOne or more GitLab Runner Helm deployments can live in any namespace, so you have many to many mapping flexibility for how you think of runners and their Kubernetes context.\n\nIn the GitLab project where you created the runner, use the web IDE to create .gitlab-ci.yml and populate it with the following content:\n   ```yaml\n   parallel-fargate-hello-world:\n     image: public.ecr.aws/docker/library/bash\n     stage: build\n     parallel: 200\n     script:\n       - echo \"Hello Fargate World\"\n   ```\n\n**Hard Lesson:** After hitting the Docker hub image pull rate limit, I shifted to the same container in the AWS Public Elastic Container Registry (ECR), which has an [image pull rate limit](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html) of 10 per second for this scenario.\n\nIf the job does not automatically start, use the pipeline page to force it to run.\n\nIf everything is configured correctly, your final pipeline status panel should look something like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/completedjobs.png)\n\n### 5. Runner scaling experimentation\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\nAdditional runners can be added by re-running the install command with a different name for the runner (if using the same token you’ll have two runners in the same group or project):\n\n`helm install --namespace gitlab-runner runner2 -f runnerregistration.yaml gitlab/gitlab-runner`\n\n200 jobs takes just under 2 minutes.\n\n#### 400 parallel jobs\n\nBy setting up a second identical job (with a unique job name), I was able to process 400 total jobs.\n\n**Hard Lesson:** The runner likes to schedule all jobs in a parallel job on the same runner instance. It does not seem to want to split a large job across multiple runners registered in the same project. So in order to get more than 200 jobs to process, I had to have two registered runners set to `concurrent:200` and two seperate jobs set to `parallel: 200`\n\n400 jobs takes just over 3 minutes.\n\n#### More than 400 parallel jobs\n\nAs I tried to scale higher, jobs started to hang. I tried specifically routing jobs to five runners each capable of 300 parallel jobs. I also tried multiple stages and used a hack of `needs []` to get simultaneous execution of jobs in multiple stages.\n\nI was not successful and there could be a wide variety of reasons why — a riddle for a future iteration.\n\nThis command can be used to update a runner's settings after editing the Helm values file (including the token to move the runner to another context): \n\n`helm upgrade --namespace gitlab-runner -f runnerregistration.yaml runner2 gitlab/gitlab-runner`\n\nI found that when I pushed the limits, I would sometimes end up with hung pods until I understood what needed adjusting. Leaving hung Fargate pods will add up to a lot of cash because the pricing assumes very short execution times. This command helps you terminate job pods without accidentally terminating the runner manager pods:\n\n`kubectl get pods --all-namespaces --no-headers |  awk '{if ($2 ~ \"_YOUR_JOB_POD_PREFACE_*\") print $2}' | xargs kubectl -n _YOUR_RUNNER_NAMESPACE_ delete pod`\n\nDon't forget to replace \\_YOUR_RUNNER_NAMESPACE_ and \\_YOUR_JOB_POD_PREFACE_ “_YOUR_JOB_POD_PREFACE\\_” is the unique preface of ONLY the jobs from a given runner followed by the wildcard star character => \\*\n\nTo uninstall a runner, use:\n\n`helm delete --namespace gitlab-runner runner1`\n\n#### Testing Auto DevOps to prove `image:` tag is honored\n\nTechnically testing Auto DevOps to prove the `image:` tag is honored this isn’t entirely necessary since the above job loads the bash container without the container being specified in any of the runner or infrastructure setup. However, I performed this as a litmus test anyway.\n\nFollow these steps:\n\n1. Create a new project by clicking the “+” sign in the top bar of GitLab.\n2. On the next page, select “New Project/Repository”.\n3. Then “Create from template”.\n4. Select “Ruby on Rails” (first choice).\n5. Once the project creation is complete, register an EKS runner to it (or re-register the existing runner to the new project).\n6. In the project, select “Settings (Gear Icon)” => “CI/CD” => Auto DevOps => Default to Auto DevOps pipeline.\n7. Click “Save changes”.\n\nThe Auto DevOps pipeline should run. If you don’t have a cluster wired up, it will mainly do security scanning, which is sufficient to prove that arbitrary containers can be used by the Fargate-backed GitLab Runner.\n\n### 6. Solution tuning via extensible platform\n\nEKS Blueprints is not only product-managed, it is also an extensible platform or framework. In the spirit of fully leveraging the extensible product managed EKS Blueprints project, you will always want to check if Blueprints is already instrumented for your scenario before writing code. Additionally, if you must write code, you can consider contributing it as an EKS Blueprint extension so the community can take on some responsibility for maintaining it.\n\n1. The EKS Blueprints Managed IaC has a dizzing number of tuning parameters and optional extensions. For instance, if you want the full GitLab Runner logs collected to AWS CloudWatch, it is a simple configuration to add fluentd log agent to push custom logs to CloudWatch.\n2. Using Fargate for always-on containers is a trade-off of compute costs to get rid of Kubernetes node management overhead. This trade-off can be easily reversed in this example by removing the \"kube-system\" from \"fargate_profiles\" - since Karpenter is also installed and configured, the hosts will autoscale for load.\n\n### 7. Teardown\n\nThe next few instructions are from https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-destroy.\n\nIf you are using AWS CloudShell and the /terraform directory no longer exists, perform these steps to re-prepare AWS CloudShell to perform teardown.\n\nIf you are not using AWS CloudShell, skip forward to “Teardown steps”.\n\n1. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n2. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n3. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n4. `cd /terraform/terraform-aws-eks-blueprints/`\n5. `git reset --hard tags/v4.29.0`\n6. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   > **Note:** The above steps can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}` .\n\n7. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n8. `terraform init`\n\nFollow these teardown steps:\n\n1. `helm delete --namespace gitlab-runner runner1`\n2. `helm delete --namespace gitlab-runner runner2`\n3. `terraform destroy -target=\"module.eks_blueprints_kubernetes_addons\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n4. `terraform destroy -target=\"module.eks\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n5. **Note:** If you receive an error about refreshing cached credentials, simply re-run the command again and it will usually update successfully.\n6. `terraform destroy -auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n### Iteration _n_ : We would love your input\n\nThis blog is \"Iteration 1\" precisely because it has not been production load-tested nor specifically cost-engineered. And obviously a “Hello, World” script is not testing much in the way of real work. I really set out to understand if we could run arbitrary containers in a GitLab Fargate setup (and we can) and then got curious about what parallel job scaling might look like with Fargate (and it looks good). The Kubernetes Runner executor has many, many available customizations and it is likely that scaling a production loaded implementation on EKS will reveal the need to tune more of these parameters. \n\n#### **Collaborative contribution challenges**\n\nHere are some ideas for further collaborative work on this project:\n\n- To push the limits, create a configuration that can scale to 1000 simultaneous jobs.\n- An aws-logging config map that uploads runner pod logs to AWS CloudWatch.\n- A cluster configuration where runner managers and everything that is not a runner job run on non-Fargate nodes – if and only if it will be cheaper than Fargate running 24 x 7.\n- A Fargate Spot configuration. It’s important that compute type be noted as a runner tag and it’s important that the same cluster has non-spot instances because some jobs should not run on spot compute and the decision whether to do so should be available to the GitLab CI Developer who is creating an pipeline.\n\n#### Other runner scaling initiatives\n\nWhile GitLab is building the Next Runner Auto-scaling Architecture, [Kubernetes refinements are not a part of this architectural initiative](https://docs.gitlab.com/ee/architecture/blueprints/runner_scaling/#proposal).\n\n#### Everyone can contribute\n\nThis tutorial, as well as code for additional examples, will be maintained as open source as a GitLab Alliances Solution and we’d love to have your contributions as you iterate and discover the configurations necessary for your real-world scenarios. This tutorial is in a group wiki and the code will be in the projects under that group here: [AWS Guided Explorations for EKS Runner Configurations](https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate/-/blob/main/README.md). \n\nPhoto by [Jeremy Lapak](https://unsplash.com/@jeremy_justin?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/runner?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[710,754,9],{"slug":859,"featured":6,"template":690},"eks-fargate-runner","content:en-us:blog:eks-fargate-runner.yml","Eks Fargate Runner","en-us/blog/eks-fargate-runner.yml","en-us/blog/eks-fargate-runner",{"_path":865,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":866,"content":869,"config":875,"_id":877,"_type":14,"title":878,"_source":16,"_file":879,"_stem":880,"_extension":19},"/en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"noIndex":6,"title":867,"description":868},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",{"title":867,"description":868,"authors":870,"heroImage":829,"date":871,"body":872,"category":681,"tags":873},[678],"2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[683,685,874,797,687,9],"testing",{"featured":91,"template":690,"slug":876},"enhance-application-quality-with-ai-powered-test-generation","content:en-us:blog:enhance-application-quality-with-ai-powered-test-generation.yml","Enhance Application Quality With Ai Powered Test Generation","en-us/blog/enhance-application-quality-with-ai-powered-test-generation.yml","en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"_path":882,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":883,"content":889,"config":895,"_id":897,"_type":14,"title":898,"_source":16,"_file":899,"_stem":900,"_extension":19},"/en-us/blog/environment-friction-cycle",{"title":884,"description":885,"ogTitle":884,"ogDescription":885,"noIndex":6,"ogImage":886,"ogUrl":887,"ogSiteName":673,"ogType":674,"canonicalUrls":887,"schema":888},"How GitLab eliminates value stream friction in dev environments","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682507/Blog/Hero%20Images/sandeep-singh-3KbACriapqQ-unsplash.jpg","https://about.gitlab.com/blog/environment-friction-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-11-17\",\n      }",{"title":890,"description":885,"authors":891,"heroImage":886,"date":892,"body":893,"category":708,"tags":894},"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup",[705],"2022-11-17","\n\nA strong DevOps value stream drives developer empowerment as far left as possible. In GitLab, this is embodied in per-feature branch merge requests that are rich with automated code quality and defect information - including not only findings - but automated remediation capabilities and collaboration. Some defects and code quality issues can only be found by analyzing a running copy of the application, including DAST, IAST, fuzzing and many others. GitLab has built a fully automated, seamless developer environment lifecycle management approach right into the developer experience. In fact, it’s so seamlessly built-in, it can be easy to overlook how critical developer environment lifecycle management is. This article will highlight why and how GitLab adds value using developer environment automation. In addition, while GitLab provides out of the box developer environment lifecycle management for Kubernetes, this article demonstrates an approach and a working example of how to extend that capability to other common cloud-based application framework PaaS offerings.\n\n## Provisioning of development environments is generally a negative feedback loop\n\nIn a prior job, I worked on a DevOps transformation team that supported multiple massive shared development environments in AWS. They were accessible to more than 4,000 developers working to build more than 100 SaaS applications and utility stacks. In the journey to the AWS Cloud, each development team took ownership of the automation required to deploy their applications. Since developers were able to self-service, over time this solved the problem of development friction generated by waiting for environments to be provisioned for testing, feature experiments, integration experiments, etc. \n\nHowever, the other half of the problem then ballooned - environment sprawl - with an untold number of environments idling without management and without knowledge of when they could be torn down. Over time the development environment cost became a significant multiple of production costs. The cloud has solved problems with environment provisioning bottlenecks due to hardware acquisition and provisioning, but this can also inadvertently fuel the high costs of unmanaged sprawl. This problem understandably causes organizations to raise administrative barriers to new development environments.\n\nIn many organizations this becomes a vicious cycle - most especially if developer environments are operated by a different team, or worse, on an independent budget. Environment justification friction usually comes quickly after discovering the true cost of the current running environments. Developers then have to justify the need for new environment requests and they have to make the gravest of promises to disband the environment as soon as they are done. Another friction arises when a separate group is tasked with cost controls and environment provisioning and cleanup. This introduces friction in the form of administrative and work queueing delays. Coordination friction also crops up because an accurate understanding of exactly what is needed for an environment can be challenging to convey. When mistakes are made or key information is missing, developers must go back and forth on support requests to get the configuration completely correct.\n\n## Partial automation can worsen the problem\n\nThat’s the first half of the environment lifecycle, but as I mentioned, even if that is fully automated and under the control of developers, the other half of the feedback loop comes into play. When a given development environment has fulfilled its initial justification reason, the team does not want to destroy it because environments are so hard to justify and create. Then the sprawl starts and, of course, the barriers to new environments are raised even higher. This is a classic negative feedback loop.\n\nSystems theory shows us that sometimes there are just a few key factors in stopping or even reversing a negative feedback loop. Lets take this specific problem apart and talk about how GitLab solves for it.\n\n## Treat developer environments as a complete lifecycle\n\nIn the prior example it is evident that by leaving out the last stage of the environment lifecycle - retirement or tear down - we still end up with a negative feedback loop. Removing provisioning friction actually makes the problem worse if retirement friction is not also addressed at the same time. Solutions to this problem need to address the entire lifecycle to avoid impacting value stream velocity. Neglecting or avoiding the retirement stage of a lifecycle is a common problem across all types of systems. In contrast, by addressing the entire lifecycle we can transform it from being a negative feedback loop to a managed lifecycle.\n\n## The problems of who and when\n\nBuried inside the insidious friction loop are a couple key coordination problems we’ll call “Who and When.” Basically, \"Who\" should create environments and \"When\" should they be created to ensure reasonable cost optimization? Then again, _Who_ should cleanup environments and _When_ do you know that the environment is no longer needed with certainty? Even with highly collaborative teams working hard together for maximum business value, these questions present a difficulty that frequently results in environments running for a long time before they are used and after they are no longer needed. The knowledge of appropriate timing plays a critical role in gaining control over this source of friction.\n\n## The problem of non-immutable development environments\n\nFriction in environment lifecycle management creates a substantial knock-on problem associated with long-lived environments. Long-lived environments that are updated multiple times for various independent projects start to accumulate configuration rot; they become snowflakes with small changes that are left over from non-implemented experiments, software or configuration removals, and other irrelevant bits and pieces. Immutability is the practice of not doing “in place” updates to a computing element, but rather destroying it and replacing it with a fresh, built-from-scratch, element. Docker has made this concept very accepted and effective in production workloads, but development environments frequently do not have this attribute due to automating without the design constraint of immutability, so they are updated in-place for reuse by various initiatives. If the environment lifecycle is not fully automated, it impossible to make them workable on a per-feature branch basis.\n\n## The problem of non-isolated development environments \n\nWhen environments are manually provisioned or when there is a lot of cost or administrative friction to setting them up, environment sharing becomes more common place. This creates sharing contention at many levels. Waiting to schedule into use an environment, pressure to complete work quickly so others can use the environment, and restrictions on the types of changes that can be made to shared environments are just some of the common sharing contention elements that arise. If environments can be isolated, then sharing contention friction evaporates. Pushing this to the extreme of a per-feature branch granularity brings many benefits, but is also difficult.\n\n## Effect on the development value stream\n\nThe effect that a friction-filled environment lifecycle has on the value stream can be immense - how many stories have you heard of projects waylaid for weeks or months while waiting on environment provisioning? What about defects shipped to production because a shared environment had left over configuration during testing? Frequently this friction is tolerated in the value stream because no one will argue that unlimited environment sprawl is an unwise use of company resources. We all turn off the lights in our home when we are no longer using a room and it is good business sense and good stewardship not to leave idle resources running at work.\n\nThe concept of good stewardship of planetary resources is actually becoming an architectural level priority in the technology sector. This is in evidenced in AWS’ [introduction of the “Sustainability” pillar to the AWS Well Architected principals in 2021](https://aws.amazon.com/blogs/aws/sustainability-pillar-well-architected-framework/) and many other green initiatives in the technology sector.\n\nIt’s imperative that efforts to improve the development value stream consider whether developer environment management friction is hampering the breadth, depth and velocity of product management and software development.\n\n## Seamless and fully automated review environment lifecycle management\n\nWhat if this negative feedback loop could be stopped? What if new environments were seamless and automatically created right at the moment they were needed? What if developers were completely happy to immediately tear down an environment when they were done because it takes no justification nor effort on their part to create new one at will?\n\nEnter GitLab Review Environments!\n\nGitLab review apps are created by the developer action of creating a new branch. No humans are involved as the environment is deployed while the developer is musing their first code changes on their branch.\n\nAs the developer pushes code updates the review apps are automatically updated with the changes and all quality checks and security scanning are run to ensure the developer understands that they introduced a vulnerability or quality defect. This is done within the shortest possible amount of time after the defect was introduced.\n\nWhen the developer merges their code, the review app is automatically torn down.\n\nThis seamless approach to developer environment provisioning and cleanup addresses enough of the critical factors in the negative feedback loop that it is effectively nullified.\n\nConsider:\n\n- Developer environment provisioning and cleanup are fully automated, transparent, developer-initiated activities. They do not consume people nor human process resources, which are always legions slower and more expensive than technology solutions.\n- Provisioning and cleanup timing are exactly synchronized with the developer’s need, preventing inefficiencies in idle time before or after environment usage.\n- They are immutable on a new branch basis - a new branch always creates a new environment from fresh copy of the latest code.\n- They are isolated - no sharing contention and no mixing of varying configuration.\n- They treat developer environments as a lifecycle.\n\nIt is so transparent that some developers may not even realize that their feature branch has an isolated environment associated with it.\n\n## Hard dollar costs are important and opportunity costs are paramount\n\nGitLab environments positively contribute to the value stream in two critical ways. First, the actual waste of idle machines is dramatically reduced. However, more importantly, all the human processes that end up being applied to managing that waste also disappear. Machines running in the cloud are only lost money. Inefficient use of people’s time carries a high dollar cost but it also carries a higher opportunity cost. There are so many value-generating activities people can do when their time is unencumbered by cost-control administration.\n\n## Multiplying the value stream contributions of developer review environments\n\nDeveloper environment friction is an industry-wide challenge and GitLab nearly eliminates the core problems of this feedback cycle. However, GitLab has also gone way beyond simply addressing this problem by creating a lot of additional value through seamless per-feature branch developer environments.\n\nHere is a visualization of where dynamic review environments plug into the overall GitLab developer workflow.\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/gitlabenvironmentlifecycle.png)\n\n**Figure 1: Review environments with AWS Cloud Services**\n\nFigure 1 is showing GitLab’s full development cycle support with a little art of the possible thrown in around interfacing with AWS deployment services. The green dashed arrow indicates that GitLab deploys a review environment when the branch is first created. Since the green arrow is part of the developer's iteration loop, the green arrow is also depicting that review app updates are done on each code push. \n\nThe light purple box is showing that the iterative development and CI checks are all within the context of a merge request (MR), which provides a Single Pane of Glass (SPOG) for all quality checks, vulnerabilities and collaboration. Finally, when the merge is done, the review environment is cleaned up. The feature branch merge request is the furthest left that visibility and remediation can be shifted. GitLab’s shifting of this into the developer feature branch is what gives developers a semi-private opportunity to fix any quality or security findings with the specific code they have added or updated.\n\nOne other thing to note here is that when GitLab CD code is engineered to handle review environments, it is reused for all other preproduction and production environments. The set of AWS icons after the “Release” icon would be using the same deployment code. However, if the GitLab CD code is engineered only around deploying to a set of static environments, it is not automatically capable of review environments. Review environment support is a superset of static environment support.\n\n## Review environments enable a profound shift left of visibility and remediation\n\nAt GitLab “shift left” is not just about “problem visibility” but also about “full developer enablement to resolve problems” while in-context. GitLab merge requests provide critical elements that encourage developers to get into a habit of defect remediation:\n\n- **Context** - Defect and vulnerability reporting is only for code the developer changed in their branch and is tracked by the merge request (MR) for that branch.\n- **Responsibility** - Since MRs and branches are associated to an individual, it is evident to the developer (and the whole team) what defects were introduced or discovered by which developers.\n- **Timing** - Developers become aware of defects nearly as soon as they are introduced, not weeks or months after having integrated with other code. If they were working on a physical product, we can envision that all the parts are still on the assembly bench.\n- **Visibility - Appropriately Local, Then Appropriately Global** - Visibility of defects is context specific. While a developer has an open MR that is still a work in progress, they can be left alone to remedy accidentally-introduced defects with little concern from others because the visibility is local to the MR. However, once they seek approvals to merge their code, then the approval process for the MR will cause the visibility of any unresolved defects and vulnerabilities to come to the attention of everyone involved in the approval process. This ensures that oversight happens with just the right timing - not too early and not forgotten. This makes a large-scale contribution to human efficiency in the development value stream.\n- **Advisement** - As much as possible GitLab integrates tools and advice right into the feature branch MR context where the defects are visible. Developers are given full vulnerability details and can take just-in-time training on specific vulnerabilities. \n- **Automated Remediation** - Developers can choose to apply auto-remediations when they are available.\n- **Collaboration** - They can use MR comments and new issues to collaborate with team mates throughout the organization on resolving defects of all types.\n\nHaving seamless, effortless review environments at a per-feature branch granularity is a critical ingredient in GitLab’s ability to maximize the shift left of the above developer capabilities. This is most critical in the developer checks that require a running copy of application, which is provided by the review environments. These checks include things such as DAST, IAST, API fuzzing and accessibility testing. The industry is also continuing to multiply the types of defect scanners that require an actively running copy of the application.\n\n## Extending GitLab review environments to other cloud application framework PaaS\n\nSo you may be thinking, “I love GitLab review environments, but not all of our applications are targeting Kubernetes.” It is true that the out- of-the-box showcasing of GitLab review environments depends on Kubernetes. One of the key reasons for this is that Kubernetes provides an integrated declarative deployment capability known as deployment manifests. The environment isolation capability, known as namespaces, also provides a critical capability. GitLab wires these Kubernetes capabilities up to a few key pieces of GitLab CD to accomplish the magic of isolated, per-feature branch review environments.\n\nAs far as I know there is no formal or defacto industry term for what I’ll call “Cloud Application Framework PaaS.” Cloud-provided PaaS can be targeted at various “levels” of the problem of building applications. For instance, primitive components such as AWS ELB address the problem of application load balancing by providing a variety of virtual, cloud-scaling and secured appliances that you can use as a component of building an application. Another example is [AWS Cognito](https://aws.amazon.com/cognito/) to help with providing user login and profile services to an application build.\n\nHowever, there are also cloud PaaS offerings that seek to solve the entire problem of rapid application building and maintenance. These are services like AWS Amplify and AWS AppRunner. These services frequently knit together primitive PaaS components (such as described above) into a composite that attempts to accelerate the entire process of building applications. Frequently these PaaS also include special CLIs or other developer tools that attempt to abstract the creation, maintenance and deployment of an Infrastructure as Code layer. They also tend to be [GitOps](/topics/gitops/)-oriented by storing this IaC in the same repository as the application code, which enables full control over deployments via Git controls such as branches and merge requests.\n\nThis approach relieves developers of early stage applications from having to learn IaC or hire IaC operations professionals too early. Basically it allows avoidance of overly early optimization of onboarding IaC skills. If the application is indeed successful it is quite common to outgrow the integrated IaC support provided by these specialized PaaS, however, the evolution is very natural because the managed IaC can simply start to be developed by specialists.\n\nThe distinction of cloud application framework PaaS is important when understanding where GitLab can create compound value with Dynamic Review Environments. I will refer to this kind of PaaS as “Cloud Application Infrastructure PaaS” that tries to solve the entire “Building Applications Problem.”\n\nSo we have a bunch of GitLab interfaces and conventions for implementing seamless developer review environments and we have non-Kubernetes cloud application infrastructures that provide declarative deployment interfaces and we can indeed make them work together! Interesting it is all done in GitLab CI YAML, which means that once you see the art of the possible, you can start implementing dynamic review environment lifecycle management for many custom environment types with the existing GitLab features. \n\n## A working, non-Kubernetes example of dynamic review environments in action\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/CloudFormationDeployAnimatedGif.gif)\n\n**Figure 2: Working CD example of review environments for AWS CloudFormation**\n\nFigure 2 shows the details of an actual non-Kubernetes working example called CloudFormation AutoDeploy With Dynamic Review Environments. This project enables any AWS CloudFormation template to be deployed. It specifically supports an isolated stack deployment whenever a review branch is created and then also destroys that environment when the branch is merged. \n\nHere are some of the key design constraints and best practices that allow it to support automated review environments.:\n\n- **The code is implemented as an include.** Notice that the main [.gitlab-ci.yml](https://gitlab.com/guided-explorations/aws/cloudformation-deploy/-/blob/main/.gitlab-ci.yml) files have only variables applicable to this project and then the inclusion of Deploy-AWSCloudFormation.gitlab-ci.yml. This allows you to treat the CloudFormation integration as a managed process, shared include to be improved and updated. If the stress of backward compatibility of managing a shared dependency is too much, you can encourage developers to make a copy of this file to essentially version peg it with their project.\n\n- **Avoids Conflict with Auto DevOps CI Stage Names** - The [standard stages of Auto Devops are here](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml#L70). This constraint allows the auto deploy template to be leveraged. \n\n- **Creates and Sequences Custom Stages as Necessary** - For instance, you can see we’ve added `create-changeset` stage and jobs.\n\n- The `deploy-review` job and it’s `environment:` section must have a very specific construction, let’s look at the important details:\n\n  ```\n    rules:\n      - if: '$CI_COMMIT_BRANCH == \"main\"'\n        when: never\n      - if: '$REVIEW_DISABLED'\n        when: never\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS == \"true\"'\n        when: manual\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS != \"true\"'\n    artifacts:\n      reports:\n        dotenv: envurl.env\n    environment:\n      name: review/$CI_COMMIT_REF_SLUG\n      url: $DYNAMIC_ENVIRONMENT_URL\n      on_stop: stop_review\n  ```\n\n  \n\n  - `rules:` are used to ensure this job only runs when we are not on the main branch. The main branch implements long lived stage and prod environments.\n  - `artifacts:reports:dotenv` allows variables populated during a CI job to become pipeline level variables. The most critical role this does in this job is to allow the URL retrieved from CloudFormation Outputs to be populated into the variable DYNAMIC_ENVIRONMENT_URL. The file `enviurl.env` would have at least the line `DYNAMIC_ENVIRONMENT_URL={url-from-cloudformation}` in it. You can see this in the job code as `echo \"DYNAMIC_ENVIRONMENT_URL=${STACK_ENV_URL}\" >> envurl.env`\n  - `environment:name:` is using the Auto Deploy convention of placing review apps under the review environments top level called `review` The reference $CI_COMMIT_REF_SLUG ensures that the branch (or tag name) is used, but with all illegal characters removed. By your development convention, the Environment Name should become a part of the IaC constructs that ensure both uniqueness as well as identifiability by this pipeline. In GitLab's standard auto deploy for Kubernetes this is done by constructing a namespace that contains the name in this provided parameter. In CloudFormation we make it part of the Stack Name. The value here is exposed in the job as the variable ${ENVRONMENT}.\n  - `environment:url:` it is not self-evident here that the variable DYNAMIC_ENVIRONMENT_URL was populated by the deployment job and added to the file `enviro.env` so that it would contain the right value at this time. This causes the GitLab “Environment” page to have a clickable link to visit the environment. It also is used by DAST and other live application scan engines to find and scan the isolated environment.\n  - `environment:on_stop:` in the deploy-review job is what maps to the `stop_review` named job. This is the magic sauce behind automatic environment deletion when a feature branch is merged. `stop_review` must be written with the correct commands to accomplish the teardown.\n\n## A reusable engineering pattern\n\nThis CloudFormation pattern serves as a higher-level pattern of how GitLab review environments can be adopted to any other cloud “Application Level PaaS.” This is a term I use to indicate a cloud PaaS that is abstracted highly enough that developers think of it as “a place to deploy applications.” Perhaps a good way to contrast it with PaaS that does not claim to serve as an entire application platform. Cloud-based load balancers are a good example of a PaaS that performs a utility function for applications but is not a place to build an entire cloud application. \n\n## Application PaaS for abstracting IaC concerns for developers\n\nGitLab auto deploy combines well with the cloud application framework PaaS that has a disposition toward developer productivity by reducing or eliminating IaC management required by developers. AWS Amplify has such productivity support in the form of a developer specific CLI which allows impacting to be authored and updated in the same Git repository where the application code is stored. Adding an entire scaling database PaaS is as simple as running a single CLI command.\n\nGenerally such Application PaaS not only generate and help maintain IaC through highly abstracted CLI or UI actions, they also contain a single `deploy` command which is easily combined with a GitLab Auto Deploy template for working with that particular Application PaaS.\n\n## Wrap up\n\nHopefully this article has helped you understand that:\n\n- GitLab already contains a super valuable feature that automates developer environment lifecycle management.\n- It is critical in addressing a key friction in the DevOps value chain.\n- It can be extended beyond Kubernetes to other cloud application framework PaaS offerings.\n\n\nPhoto by [Sandeep Singh](https://unsplash.com/@funjabi?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/friction?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[710,754,9],{"slug":896,"featured":6,"template":690},"environment-friction-cycle","content:en-us:blog:environment-friction-cycle.yml","Environment Friction Cycle","en-us/blog/environment-friction-cycle.yml","en-us/blog/environment-friction-cycle",{"_path":902,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":903,"content":909,"config":917,"_id":919,"_type":14,"title":920,"_source":16,"_file":921,"_stem":922,"_extension":19},"/en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"title":904,"description":905,"ogTitle":904,"ogDescription":905,"noIndex":6,"ogImage":906,"ogUrl":907,"ogSiteName":673,"ogType":674,"canonicalUrls":907,"schema":908},"How to export vulnerability reports to HTML/PDF and Jira","With GitLab's API, it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/exporting-vulnerability-reports-to-html-pdf-jira","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to export vulnerability reports to HTML/PDF and Jira\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-09-14\",\n      }",{"title":904,"description":905,"authors":910,"heroImage":906,"date":912,"body":913,"category":708,"tags":914},[911],"Siddharth Mathur","2023-09-14","\nGitLab's [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) makes it easy to triage security scan results without ever having to leave the platform. You can manage your code, run security scans against it, and fix vulnerabilities all in one place. That being said, some teams prefer to manage their vulnerabilities in a separate tool like Jira. They may also need to present the vulnerability report to leadership in a digestible format.\n\nOut of the box, GitLab's Vulnerability Report can be [exported to CSV](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/#export-vulnerability-details) with a single click, for easy analysis in other tools. In some cases though, a simple PDF of the report is all that's needed. \n\nWith [GitLab's API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities), it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project. In this blog, we'll show you how to export to HTML/PDF and Jira. **Note that the scripts used in this tutorial are provided for educational purposes and they are not supported by GitLab.**\n\n## Exporting to HTML/PDF\nTo export your vulnerability reports to HTML or PDF, head to the [Custom Vulnerability Reporting](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting) project. \n\n![Project overview](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_overview.png)\n\n\nThis project contains a script that queries a project's vulnerability report, and then generates an HTML file from that data. The pipeline configured in the project runs this script and converts the HTML file to PDF as well.\n\nTo use the exporter, first [fork the project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project).\n\n![Project import](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_import.png)\n\n\nSet the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/custom-vulnerability-reporting)\n\nAfter you've set the required CI/CD variables, manually run a pipeline from your project's Pipelines page. Once the pipeline is complete, you'll see your file export by going to the “build_report” (for HTML) or “pdf_conversion” job and selecting “Download” or “Browse” on the sidebar under \"Job artifacts.\" And there you have it! A shareable, easy-to-read export of your project's vulnerabilities.\n\n![PDF export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/pdf_export.png)\n\n\n## Exporting vulnerability info to Jira\nGitLab lets you create Jira tickets from vulnerabilities through the UI using our [Jira integration](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-jira-issue-for-a-vulnerability). While you can do this individually for vulnerabilities that need actioning, sometimes teams need to bulk-create Jira tickets for all their vulnerabilities. We can leverage GitLab and Jira's APIs to achieve this.\n\nTo get started, head to the [External Vulnerability Tracking](https://gitlab.com/smathur/external-vulnerability-tracking) project. This script fetches vulnerabilities in the same way as the script above, but it uses the Jira API to create a ticket for each vulnerability. Each ticket's description is also populated with details from GitLab's vulnerability report.\n\nTo use the exporter, simply [fork the project](https://gitlab.com/smathur/external-vulnerability-tracking/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project), and set the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/external-vulnerability-tracking)\n\nYou will also need the following from Jira:\n- Jira [personal access token](https://id.atlassian.com/manage-profile/security/api-tokens)\n- Jira API issue endpoint URL (for SaaS this is https://ORG_NAME.atlassian.net/rest/api/latest/issue/)\n- Jira user email ID\n- Jira project key where you want to create vulnerability tickets (e.g. ABC)\n\nOnce you have set your CI/CD variables as described in the project readme, simply run a pipeline from your project's Pipelines page, and watch as your tickets get created in Jira!\n\nIf you run the pipeline again in the future, the script will run a search query against your Jira project to prevent duplicate tickets from being created. It will create tickets for new vulnerabilities that aren't already in Jira.\n\n![Jira export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/jira_export.png)\n\n\n## References\n- [GitLab Vulnerability API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities)\n- [Custom Vulnerability Reporting project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting)\n- [External Vulnerability Tracking project](https://gitlab.com/smathur/external-vulnerability-tracking)\n- [Jira REST API examples](https://developer.atlassian.com/server/jira/platform/jira-rest-api-examples/)\n\n",[687,915,916,9],"collaboration","security",{"slug":918,"featured":6,"template":690},"exporting-vulnerability-reports-to-html-pdf-jira","content:en-us:blog:exporting-vulnerability-reports-to-html-pdf-jira.yml","Exporting Vulnerability Reports To Html Pdf Jira","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira.yml","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"_path":924,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":925,"content":931,"config":940,"_id":942,"_type":14,"title":943,"_source":16,"_file":944,"_stem":945,"_extension":19},"/en-us/blog/five-signs-you-should-think-bigger",{"title":926,"description":927,"ogTitle":926,"ogDescription":927,"noIndex":6,"ogImage":928,"ogUrl":929,"ogSiteName":673,"ogType":674,"canonicalUrls":929,"schema":930},"Five signs you should think BIGGER!","Are you a designer who is frustrated with only focusing on the next milestone? Do you feel like you have to answer too many questions in every Issue? Do you feel like your product is not making any progress? **Time to Think Bigger!**","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099620/Blog/Hero%20Images/Blog/Hero%20Images/insights_insights.png_1750099620265.png","https://about.gitlab.com/blog/five-signs-you-should-think-bigger","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five signs you should think BIGGER!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Iain Camacho\"}],\n        \"datePublished\": \"2021-03-30\",\n      }",{"title":926,"description":927,"authors":932,"heroImage":928,"date":934,"body":935,"category":708,"tags":936},[933],"Iain Camacho","2021-03-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAs a designer, it’s difficult to balance the scale of initiatives: Design too small, and nobody is excited or can understand the direction things are going. Start too big and everyone on the team may be too intimidated to start. ThinkBIG is a way of utilizing designers’ natural skillset to balance the iterative nature of engineering with the visionary nature of design.\n\nHere are 5 signals that you should switch up your style and Think Bigger:\n\n### 1) Every milestone is spent only prepping the next\n\n#### Signal\n\nWe’ve all been there. The next milestone planning issue is starting to get filled out and you, the designer, are realizing how many issues need design in order to be ready. As the priorities shift, you know the last two weeks of this milestone will be spent desperately trying to design mockups for engineers to start working on days later. I like to call this “Feeding the sharks”. It describes a certain level of panic some designers feel every milestone: If I don’t deliver enough, I might get chomped!\n\n#### Solution\n\nThinkBIG focuses on creating a larger-scale vision that can be iterated on as we go. This means that each design you put together leads to many independent issues engineers can work on. For a designer, this increases [results](https://handbook.gitlab.com/handbook/values/#results) by delivering one design worth many issues.\n\n### 2) Engineers are asking _a lot_ of questions\n\n#### Signal\n\nHave you ever started a new milestone and as engineers get started, they have a million questions detailing every possible state, permutation, and example that they should account for? This line of questioning means you, the designer, now need to make a myriad of new designs with only minute changes between them. This is not an [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) use of the designer’s time.\n\n#### Solution\n\nFirst off, all these questions are valid and decisions that need to be made. By Thinking Bigger, engineers are better prepared to handle all the edge cases independently because they walk into their work with a fuller context of the impact on users.  This enables empathy-driven engineering, allowing engineers to lead the conversation around edge-cases with solutions in mind, instead of needing it to be defined ahead of time. By pushing the edge cases further down the product development lifecycle, there is also a unique opportunity for product, design, and engineering to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) on delivering value to customers while still working iteratively.\n\n### 3) Nobody agrees on what the “MVC” actually is\n\n#### Signal\n\nPicture it: You’ve worked hard for weeks refining and distilling a big feature ask into a nicely designed MVC. It’s small, delivers value, and is beautiful to boot! You’ve convinced your PM to prioritize this beautiful little gem and it’s going onto the planning board. Everything feels amazing until… devastation!\n\nAfter engineering looked at it, they came back and said it was too large and would need to be broken down further. Now you’re at the end of your milestone and you’re swiftly picking away at your beautiful design into a shallow imitation of its former glory.\n\n#### Solution\n\nHowever, there is a simple way to keep this from happening: “[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is a team sport”. The designer shouldn’t be the only person on the team compromising for the sake of MVC. With ThinkBIG, you have multiple chances to bring engineering into the fold early and with the full vision in mind. This means devs are part of the conversation from the start, able to craft a valuable iteration and your designs become the conversation piece of deciding “What can we do next to deliver an amazing experience to our customers?”\n\n### 4) We’re working so hard but not getting anywhere\n\n#### Signal\n\nWorking iteratively is incredibly powerful and at GitLab, we can see the value of an iterative approach. We’re able to change our priorities at a moment’s notice and the work we actually have to deliver is reasonable and manageable while continuously delivering new value to customers. There is, however, a small drawback: When you’re only focusing on the step immediately in front of you, it’s easy to get lost along the way.\n\n#### Solution\nAs a designer, we have a unique opportunity to be the navigator for our teams. Using the ThinkBIG model, designers are empowered to hold responsibility for the Vision. From here, the Product Manager/Product Designer relationship becomes a balance between the vision and the strategy. Designs based on the large vision are used to keep the team on track for hitting the targets that bring value to customers while allowing for collaboration with the rest of the team on what tiny steps we take to get there.\n\n### 5) Engineers are reworking a lot\n\n#### Signal\n\nMy engineer and I are excited to work on a new effort. I’ve designed the first iteration and successfully passed it to them.  While they’re building, I’m working on the design for the next iteration. A few weeks later the new changes are merged, the next iteration designs are ready, and customers are already seeing value. Your engineer looks at the next iteration and painfully mutters “Well, I’ll have to rewrite what I wrote the last milestone to account for this.”\n\n#### Solution\n\nIn a highly iterative development lifecycle, it’s not uncommon to have to rework things as the product evolves. However, it shouldn’t be happening every time. With ThinkBIG, engineers are informed of the long-term goal as well as the short-term MVC iteration. This extra context allows them to deliver the iteration while architecting their code in an informed way of where it will go.\n\n### Start Thinking BIGGER!\n\nAre some of these signals sounding familiar? Then switching your design style to ThinkBIG may be for you! The simplest way to make this change is to move iteration breakdown to **after** the design phase. It immediately shows engineers where we want to go as a product or feature, opens the implementation breakdown (MVC) conversation to the whole team, and provides incredibly valuable insight to everyone on the team. This model of working helps designers be more efficient, deliver results, and foster a tight collaboration with the broader team. To see this process in action, check out a [Package ThinkBIG around the dependency proxy design and research](https://www.youtube.com/watch?v=LXFu6oDxhsw). For more information, check out the GitLab Handbook on [ThinkBIG](https://about.gitlab.com/handbook/product/ux/thinkbig/) to learn more.\n",[915,937,938,939,9],"design","inside GitLab","remote work",{"slug":941,"featured":6,"template":690},"five-signs-you-should-think-bigger","content:en-us:blog:five-signs-you-should-think-bigger.yml","Five Signs You Should Think Bigger","en-us/blog/five-signs-you-should-think-bigger.yml","en-us/blog/five-signs-you-should-think-bigger",{"_path":947,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":948,"content":954,"config":962,"_id":964,"_type":14,"title":965,"_source":16,"_file":966,"_stem":967,"_extension":19},"/en-us/blog/gitlab-apis-ci",{"title":949,"description":950,"ogTitle":949,"ogDescription":950,"noIndex":6,"ogImage":951,"ogUrl":952,"ogSiteName":673,"ogType":674,"canonicalUrls":952,"schema":953},"Using Gitlab APIs: Real Use Case Scenario","Learn about how GitLab CI and APIs can help you automate bulk tasks","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681037/Blog/Hero%20Images/gitlabapi-cover.jpg","https://about.gitlab.com/blog/gitlab-apis-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using Gitlab APIs: Real Use Case Scenario\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2020-01-22\",\n      }",{"title":949,"description":950,"authors":955,"heroImage":951,"date":957,"body":958,"category":959,"tags":960},[956],"William Arias","2020-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitlab APIs along with  Continuous Integration can be very helpful when executing certain bulk tasks.\n\nConsider this requirement derived from a real-world scenario\n\n* Company XYZ possess several repositories that have been organized under a Gitlab group\n\n![group](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/gitlab-group.png){: .shadow.medium.center.wrap-text}\n\n* The company needs to test the building of projects in bulk using new  hardware (Runner with different CPU Architecture) that will bring down  execution costs, whenever the build in each of the projects fails an issue must be  automatically created.\n\n![runner](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/runner.png){: .shadow.medium.center.wrap-text}\n\n* Lastly, all the issues that were automatically created whenever a project built failed,  should be collected in bulk and reported back to a Wiki\n\n![pipelineview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/3-pipelineview-collect-issues.png){: .shadow.medium.center.wrap-text}\n\nHow do we test the building of those several projects and create issues and reports about its execution automatically? Let's use Gitlab CI and  APIs.\n\n\n## 1. Company groups and projects Structure\n\nIn this case, the set of projects were grouped under a single group, following this structure:\n\n![groupview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/4-group-view-api-blog.png){: .shadow.medium.center.wrap-text}\n\n## 2. Automatically creating Issues leveraging Gitlab CI and API\n\nIn order to create issues using Gitlab API we will use the Issues API an example of that  can use the following cURL command:\n\n![curl](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-create-issue-api-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nThe API Call: \n\n `curl --request POST --header \"PRIVATE-TOKEN:$ISSUE_API_KEY\" \"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/issues?title=Build%20Failed&labels=ARMbuild&description=Project%20Tests%20Failed%20on%20ARM\"`\n\n The previous Gitlab API call can be configured to be executed whenever a job fails. Let's dissect this API Call to understand its parameters so you can potentially customize it  for your project environment\n\n* Base URL:  https://gitlab.com/api/v4/projects\n* Project where we want to add the issue:  $CI_PROJECT_ID Notice this ID is unique and corresponds to the project where the CI/CD pipeline runs \n* Issues: Endpoint we use to tell Gitlab we want to add an issue to the project\n* Parameters:\n  * Title: How we want the issue to be titled\n  * Labels: Helpful to group issues by label or type, They help you organize and tag your work so you can track and find the work items you’re interested in.\n  * Description: Field to explain the nature of the issue if needed\n\n The request is of type POST, because we are sending data to our receiver service.  For this call to be successful it requires  authentication for which we will use *PRIVATE-TOKEN* header\n\n The private token can be generated by following these steps [How-to-generate-token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n\nWhen we execute the above API call, we create an issue in the corresponding Gitlab project\n![issueproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-issues-created.png){: .shadow.medium.center.wrap-text}\n\nGreat, so once the multi-project pipeline has run,  each of the projects that failed in its building stage will create an issue warning us to double check why it failed while documenting the failure and labeling it for future follow-up.\n![multiproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7.1-multiproject-pipeline-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\n## 3. Automatically collecting all the issues from Gitlab Group\n\nThanks to Gitlab CI and APIs we can collect all the issues created and report them back, by adding this script  in  your pipeline stage\n\n![collectissues](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7-collecting-issues-apiblog.png){: .shadow.medium.center.wrap-text}\n\nLet's dissect again the main API call:\n\n`curl --header \"PRIVATE-TOKEN:$GROUP_ISSUE_LIST\" \"https://gitlab.com/api/v4/groups/9123625/issues`\n\n* Base url: https://gitlab.com/api/v4/\n* Group resource: /groups/9123625\n* Issues resources: /issues \n\nThe previous API call will return a json object, the one we will save as an artifact when executing our pipeline job. Notice this artifact is created and saved automatically by Gitlab CI\nGreat! So far we created issues per failed project, and collected them all in one single step\n\n\n## 4. Reporting back to Wiki Project \n\n![wikijob](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/8-reportwiki-gitlab-api.png){: .shadow.medium.center.wrap-text}\n\nFor convenience, the json report was transformed to markdown, then using the following script we publish the markdown report to the Wiki of an specific project\n\n`curl --data \"format=markdown&title=$CI_JOB_ID&content=$results\" --header \"PRIVATE-TOKEN:$API_WIKI\" \"https://gitlab.com/api/v4/projects/20852684/wikis\"`\n\nLet's breakdown again the API call:\n\n* Base url: https://gitlab.com/api/v4/\n* Project resource ID : /projects/20852684\n* Wiki resource: /wiki\n* Parameters: \n  * Data format: markdown. We want to publish a markdown table\n  * Title: Title of the Wiki entry, we use the environment variable corresponding to the CI_JOB that was executed\n  * Content: The markdown table generated with the issues collection\n\n Finally, when the last API call has been executed, this is an example of the output we can get: \n\n ![report](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/10-test-report-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nLet's recapitulate, by using Gitlab CI in a multi project pipeline along with APIs we were able to test and report automatically x-number of projects and its compatibility with a new hardware CPU architecture. More information about the APIs utilized for this project here:\n\n[Issues-api](https://docs.gitlab.com/ee/api/issues.html#new-issue)\n[Collect-group-issues](https://docs.gitlab.com/ee/api/issues.html#list-group-issues)\n[WikisAPI](https://docs.gitlab.com/ee/api/wikis.html)\n\n[Multi-project-pipeline](https://about.gitlab.com/blog/cross-project-pipeline/)\n\n\nIf you’d like to see GitLab’s API in action, watch this [video](https://youtu.be/zdBwMHARkU0?t=469).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nCover image credit:\n\nCover image by [Mohanan](https://unsplash.com/photos/yQpAaMsQzYE) on [Unsplash](https://unsplash.com)\n{: .note}\n\n","unfiltered",[732,9,710,961],"demo",{"slug":963,"featured":6,"template":690},"gitlab-apis-ci","content:en-us:blog:gitlab-apis-ci.yml","Gitlab Apis Ci","en-us/blog/gitlab-apis-ci.yml","en-us/blog/gitlab-apis-ci",{"_path":969,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":970,"content":976,"config":981,"_id":983,"_type":14,"title":984,"_source":16,"_file":985,"_stem":986,"_extension":19},"/en-us/blog/gitlab-at-aws-re-invent-2023",{"title":971,"description":972,"ogTitle":971,"ogDescription":972,"noIndex":6,"ogImage":973,"ogUrl":974,"ogSiteName":673,"ogType":674,"canonicalUrls":974,"schema":975},"GitLab at AWS re:Invent 2023","GitLab and AWS have streamlined development and security for DevSecOps teams. Learn how in lightning talks, sessions, live demos, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/gitlab-at-aws-re-invent-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab at AWS re:Invent 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-11-22\",\n      }",{"title":971,"description":972,"authors":977,"heroImage":973,"date":978,"body":979,"category":773,"tags":980},[705],"2023-11-22","GitLab will be at AWS re:Invent 2023 in Las Vegas, November 27 to December 1, to demonstrate how the GitLab DevSecOps Platform on Amazon Web Services delivers secure, enterprise-grade AI throughout the software development lifecycle. Stop by Booth #1152 in the Security Zone for [lightning talks, live demos, customer sessions, and more](https://about.gitlab.com/events/aws-reinvent/) all week. \n\nMake sure to [check out our event page and calendar](https://about.gitlab.com/events/aws-reinvent/) to find sessions, locations, opportunities to meet with GitLab, and more (note, they do not appear in the AWS event app). Some sessions will also be available on-demand after the conference.\n\nHere are some of the lightning talks GitLab will be presenting:\n\n**Frictionless developer experience: Using human habits to accelerate DevSecOps maturity and increase joy**\n\nGitLab’s long-standing approach to building DevSecOps pipelines aligns with AWS’ new emphasis on frictionless developer experiences. Join this session to learn how the GitLab DevSecOps platform represents a true “shift left” by empowering and streamlining developers’ normal workflow.\n\n[Add to calendar - Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654966e4f2269af78f005ba1)\n\n**New integrations and solutions for using GitLab and AWS together**\n\nIn recent months, AWS and GitLab have built new service integrations for source control, CI, and CD. You'll learn how GitLab integrates with AWS CodeStar Connections, Amazon CodeGuru, OpenID, and more, as well as development and deployment solutions for Serverless.com Framework and Terraform to AWS.\n\nAdd to calendar\n* [Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654144eef011a50313dc7113)\n* [Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654942dfef8fa23b213f0eca)\n* [Nov. 30](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=65494b66a0b8daf9ca33a386)\n\n**Secure and assured Terraform development using GitLab security scanning policies and managed DevOps environments**\n\nThis lightning talk discusses and demonstrates working example code that extends GitLab's existing support for Terraform State management with full lifecycle-managed DevOps environments for merge requests, long-lived pre-production environments, production environments, and one-off experimental environments. Whether you are developing infrastructure as code specifically or embedding it with application code for the sake of easy environment support, this lightning talk has something to offer you.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654961043165b6f013635639)\n\n**Secure GitLab CD pipelines to AWS with OpenID Federation, OIDC, and JWT**\n\nGitLab has three ways to authenticate and authorize your CI and CD workloads into AWS environments. Adding and refining OpenID provides the ability to use an industry standard, which is the most advanced of the three. Join us to learn how to accomplish this highly secure integration option.\n\n[Add to calendar - Nov. 29](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=6549580763edc0caa46ea061)\n\n**Security intelligence through full integration of Amazon CodeGuru Security into GitLab**\n\nAWS CodeGuru Security has created a full integration that enables you to view scanner results in GitLab merge requests and security dashboards so you can use them to block merges in security policy merge approval rules — just like GitLab’s integrated security scanning results. Attend this lightning talk to learn more.\n\n[Add to calendar - Nov. 28](https://content.gitlab.com/viewer/65412018ca9e0b9d4b50acb2?iid=654953f963edc0cdbf6e8c6f)\n\n## GitLab and AWS: The year in review\nThroughout 2023, GitLab and AWS announced partner designations and new service integrations that enable development, security, and operations teams to collaborate more easily, to take advantage of AI at all stages, and to flexibly scale infrastructure to create and deploy secure software faster. \n\n#### AWS recognized GitLab as a partner in several categories\n\n- **AWS DevSecOps Partner Competency Specialty:** This specialty denotes that GitLab makes it easy for customers to [integrate security across every stage](https://about.gitlab.com/blog/aws-devsecops-competency-partner/) of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\n\n-  **Amazon Linux 2023 Ready Partner:** Amazon Linux 2023-specific RPM packages are available for GitLab, starting at [Version 16.3.0](https://docs.gitlab.com/ee/administration/package_information/supported_os.html) and for GitLab Runner. Official GitLab support for Amazon Linux 2023 also means GitLab builds the RPM packages and hosts them on our packages infrastructure, Graviton (arm64) and amd64 architectures are both supported. To install GitLab on Amazon Linux 2023, [follow these instructions](https://about.gitlab.com/install/#amazonlinux-2023). \n\nLearn more about [GitLab's AWS partner designations](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_partner_designations.html).\n\n#### AWS CodeStar Connections opens up a host of AWS service integrations\n\nAWS recently completed the integration of GitLab.com SaaS into its AWS CodeStar Connections service. This service is a foundational, shared service used by many other AWS services to connect to Git repositories outside of AWS. As a result, GitLab was immediately available to AWS services once this integration was completed.\n\nGitLab is available at CodeStar Connections throughout many AWS services for connectivity to Git. In addition, using a CodeStar Connection for an AWS CodePipeline opens up other service integrations that primarily rely on CodePipeline as their key integration point.\n\nHere is a visual map of the integrations that are currently available:\n\n![CodeStar Connections integrations](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676883/Blog/Content%20Images/gitlabcodestarconnectionsintegration.png)\n\n#### AI customization with AWS CodeWhisperer\n[AWS CodeWhisperer's customization capability](https://aws.amazon.com/blogs/aws/new-customization-capability-in-amazon-codewhisperer-generates-even-better-suggestions-preview/) leverages CodeSuite Connections, allowing generative code suggestions to take into account the libraries and design patterns of your current application when suggesting new code. It does so with no ingestion of your code into the general LMM creation. AWS CodeWhisperer can be pointed to a GitLab repository. \n\n#### AWS CodeGuru and GitLab Ultimate secure scanning integration\nThe AWS CodeGuru team [built an integration with GitLab CI](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html#scm-integrations) as part of their build secure scanning capabilities. [Amazon CodeGuru Security findings](https://docs.aws.amazon.com/codeguru/latest/security-ug/get-started-gitlab.html) use GitLab’s vulnerability report formatting, enabling exports to integrate directly into GitLab Ultimate security features such as merge request views, security dashboards, and in-context remediation solutions and training. Importantly, it allows these findings to be addressed by GitLab Security Policy Merge Approval Rules. \n\n#### GitLab's new single-tenant Saas option sits atop AWS\nEarlier this year, GitLab launched [GitLab Dedicated](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/), a single-tenancy solution for organizations in highly regulated industries that have complex regulatory, compliance, and data residency requirements. The fully isolated SaaS offering is hosted and managed by GitLab and deployed on AWS in a cloud region of the customer's choosing. [Learn more about how GitLab built GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/).\n\n## Plan your week at AWS re:Invent\nFill your calendar with GitLab at AWS re:Invent! [Check out our calendar](https://about.gitlab.com/events/aws-reinvent/) of sponsored sessions, lightning talks, live demos, and more throughout the week at Booth #1152.\n",[9,797,482,282],{"slug":982,"featured":91,"template":690},"gitlab-at-aws-re-invent-2023","content:en-us:blog:gitlab-at-aws-re-invent-2023.yml","Gitlab At Aws Re Invent 2023","en-us/blog/gitlab-at-aws-re-invent-2023.yml","en-us/blog/gitlab-at-aws-re-invent-2023",{"_path":988,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":989,"content":995,"config":1002,"_id":1004,"_type":14,"title":1005,"_source":16,"_file":1006,"_stem":1007,"_extension":19},"/en-us/blog/gitlab-cnh-for-50k-users",{"title":990,"description":991,"ogTitle":990,"ogDescription":991,"noIndex":6,"ogImage":992,"ogUrl":993,"ogSiteName":673,"ogType":674,"canonicalUrls":993,"schema":994},"Ready-To-Run GitLab for 50,000 users with AWS Quick Start","If you have two hours, you can deploy a GitLab instance on EKS for any number of users. All it takes is about 14 clicks! Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/gitlab-cnh-for-50k-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2021-10-06\",\n      }",{"title":996,"description":991,"authors":997,"heroImage":992,"date":998,"body":999,"category":708,"tags":1000},"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start",[705],"2021-10-06","\n\nIf you have spent time reviewing GitLab Reference Architectures, you may have noticed the flexibility of the GitLab codebase; it's possible to support a broad range of implementations from a single box for under one hundred users to horizontal hyper-scaled setups for 50,000 or more.\n\nScaling to massive sizes requires the services within GitLab to be broken out into dedicated compute and storage layers so they can each expand cost effectively based on high loading and an organization's specific usage patterns.\n\nThose who provision large scale systems on the cloud generally turn to [Infrastructure as Code (IaC)](/direction/delivery/infrastructure_as_code/) to ensure consistency and to allow easy setup of pre-production environments for the target system. Until recently, GitLab implementers have had to craft this code from scratch.\n\nNow, thanks to our investments in IaC tooling, GitLab customers now have an entire implementation eco-system to work from. These efforts include the [GitLab Environment Toolkit (GET)](/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/) and the AWS Quick Start for cloud native hybrid on EKS.\n\nThis post will focus on the AWS Quick Start - but it's worth noting both initiatives are open source - so you can consume, customize and contribute!\n\n## What is an AWS Quick Start?\n\nAWS Quick Starts are much more than the \"getting started\" feeling implied by their name. As a part of the Quick Start program, AWS ensures that each one reflects the best practices of the software vendor (GitLab in this case) as well as AWS' own well-architected standards. They reflects a high level of technical partnership and technical assurance by both companies. The Quick Start program also includes a hard requirement for high availability of every component of the deployed application. Even bastion hosts are run in an autoscaling group so they will respawn if they unexpectedly terminate. Quick Starts are also intended to create a \"Ready-to-Run\" implementation whenever possible. Quick Starts are open source and have a dependency model which allows GitLab to reuse the existing EKS Quick Start as a foundation.\n\n## What Is the GitLab AWS implementation pattern for cloud native hybrid on EKS?\n\nGitLab has Reference Architectures that determine how to install GitLab for various user counts. Each Reference Architecture has a section on cloud native hybrid to show how to configure it and the advised number of vCPUs and memory for the target user count. Each one is similar to blueprints for a building. \n\nThe AWS implementation pattern for cloud native hybrid on EKS builds on this information by:\n\n- Showing how to maximize the usage of AWS PaaS with assurance of GitLab Reference Architecture compliance.\n- Showing a tally of total cluster resources as specified by the Rreference Architecture.\n- Presenting a bill of materials listing:\n\n  - EKS node instance type (sizing) and count as tested.\n  - RDS PostgreSQL and Redis Elasticache instance types (sizing) and count as tested.\n  - Gitaly Cluster instance types (sizing) and count as tested.\n  \n- [GPT testing](https://gitlab.com/gitlab-org/quality/performance) results for a system configured according to the bill of materials. This can be used to compare back to the reference architectures and to your own configuration that is based on the bill of materials.\n\nSo while the Reference Architectures are like building blueprints, the AWS implementation pattern for cloud native hybrid on EKS intends to be like a bBill of mterials (shopping list) you can plug directly into the parameters of the AWS Quick Start or the GitLab Environment Toolkit to build GitLab on EKS with a pre-tested configuration.\n\n## \"Deploy Now\" links\n\nWithin each AWS implementation pattern for cloud native hybrid on EKS you will find some \"Deploy Now\" links.  These make the AWS Quick Start even easier to use by presetting all the instance types and instance counts based on the bill of materials for the user size.  This reduces the number of fields you need to fill out on the Quick Start form. The Deploy Now links are how we were able to reduce the number of clicks to deploy for 50,000 users to just 14.\n\nThe Quick Start takes about two hours to deploy regardless of the size of instance you choose.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/s3ZaBXYG8nc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How you can deploy GitLab for any number of users in a couple of hours\n\nThe YouTube playlist [Learning to provision the AWS Quick Start for GitLab on EKS](https://youtube.com/playlist?list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5) walks you through:\n\n1. [GitLab Reference Architectures, performance testing, cloud native hybrid and what is Gitaly](https://www.youtube.com/watch?v=1TYLv2xLkZY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=1&t=399s) (11mins)\n2. [An overview of GitLab AWS implementation patterns](https://www.youtube.com/watch?v=_x3I1aq7fog&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=2) (13mins)\n3. [An overview of AWS Quick Start for cloud native hybrid on EKS](https://www.youtube.com/watch?v=XHg6m6fJjRY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=3&t=8s) (9mins)\n4. [Provisioning Ready-To-Run GitLab for 50,000 users in 14 clicks and a long lunch)](https://www.youtube.com/watch?v=s3ZaBXYG8nc&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=4&t=798s) (21mins) - same as above video.\n5. [Easy performance testing an AWS Quick Start-provisioned GitLab cloud native hybrid instance](https://www.youtube.com/watch?v=QpkF1vXXCjk&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=5&t=510s) (32mins)\n\nIf you would like help getting started with Gitlab instance provisioning on AWS, please contact your GitLab account team or reach out to [GitLab Sales](https://about.gitlab.com/sales/)!\n",[1001,231,9],"cloud native",{"slug":1003,"featured":6,"template":690},"gitlab-cnh-for-50k-users","content:en-us:blog:gitlab-cnh-for-50k-users.yml","Gitlab Cnh For 50k Users","en-us/blog/gitlab-cnh-for-50k-users.yml","en-us/blog/gitlab-cnh-for-50k-users",{"_path":1009,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1010,"content":1016,"config":1021,"_id":1023,"_type":14,"title":1024,"_source":16,"_file":1025,"_stem":1026,"_extension":19},"/en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"title":1011,"description":1012,"ogTitle":1011,"ogDescription":1012,"noIndex":6,"ogImage":1013,"ogUrl":1014,"ogSiteName":673,"ogType":674,"canonicalUrls":1014,"schema":1015},"GitLab Duo + Amazon Q: Transform ideas into code in minutes","The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097127/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750097126673.png","https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo + Amazon Q: Transform ideas into code in minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-28\",\n      }",{"title":1011,"description":1012,"authors":1017,"heroImage":1013,"date":1018,"body":1019,"category":681,"tags":1020},[678],"2025-04-28","Have you ever spent days or even weeks converting a complex issue into working code? We've all been there. You start with a solid idea and a clear set of requirements, but the path from that initial concept to deployable code can be frustratingly long. Your productivity gets bogged down in implementation details, and projects that should move quickly end up dragging on.\n\nThis is where the power of [agentic AI](https://about.gitlab.com/topics/agentic-ai/) capabilities comes in. [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which combines the comprehensive AI-powered DevSecOps platform with the deepest set of cloud computing capabilities, is designed to dramatically accelerate your application development process, all within your familiar GitLab workflow. By streamlining your path from idea to deployment, this powerful integration can propose implementation solutions based on your issue descriptions alone – transforming what used to take days into something that happens in minutes.\n\n## How it works: From issue to working code\n\nLet's walk through how this agentic AI feature works in practice. Imagine you're a developer tasked with creating a mortgage calculator application. Here's how GitLab Duo with Amazon Q helps you get it done:\n\n1. **Create an issue with detailed requirements:** Start by creating a standard [GitLab issue](https://docs.gitlab.com/user/project/issues/). In the description, you'll provide a comprehensive list of requirements that your service needs to meet. This becomes the blueprint for your solution.\n\n2. **Invoke Amazon Q with a quick action:** Once your issue is created, simply add a comment with a quick action, “/q dev”, to invoke Amazon Q. This is where the magic begins. \n\n3. **Let AI generate your implementation:** GitLab Duo with Amazon Q analyzes the issue description you've provided and the context of your source code, then autonomously generates code that meets all your stated requirements. It doesn't stop there – it actually commits those changes in a merge request, ready for your review.\n\n![GitLab Duo  with Amazon Q activity pop-up screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097156/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097156018.png)\n\n4. **Review the generated application**: Navigate into the merge request to review the generated code. You can verify that all your requirements have been met and make any necessary adjustments.\n\n5. **Test the proposed application**: Finally, check that the application runs successfully. With minimal effort on your part, you now have working code that implements your original requirements.\n\n## Improve your development process\n\nGitLab Duo with Amazon Q completely transforms this process, including dramatically decreasing the time it takes to carry out complex developer tasks, through intelligent automation. By leveraging an agentic AI approach, you can accelerate your path from idea to deployment, freeing development teams to focus on more strategic work.\n\nWith GitLab Duo and Amazon Q, you'll develop software faster, more efficiently, and with less manual coding effort. This integration helps you:\n\n* **Save valuable development time** by automating implementation based on requirements  \n* **Maintain consistency** in code generation across your projects  \n* **Reduce the cognitive load** of translating requirements into working code  \n* **Accelerate your release cycles** by removing implementation bottlenecks  \n* **Focus your expertise** on reviewing and optimizing, rather than writing boilerplate code\n\nReady to see GitLab Duo with Amazon Q in action? Watch our demo video to discover how you can transform your development workflow today.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jxxzNst3jpo?si=j_LQdZhUnwqoQEst\" title=\"GitLab Duo with Amazon Q demo video for dev workflow\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[683,9,687,482,685,231],{"slug":1022,"featured":91,"template":690},"gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","content:en-us:blog:gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","Gitlab Duo Amazon Q Transform Ideas Into Code In Minutes","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"_path":1028,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1029,"content":1034,"config":1040,"_id":1042,"_type":14,"title":1043,"_source":16,"_file":1044,"_stem":1045,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"title":1030,"description":1031,"ogTitle":1030,"ogDescription":1031,"noIndex":6,"ogImage":829,"ogUrl":1032,"ogSiteName":673,"ogType":674,"canonicalUrls":1032,"schema":1033},"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available","The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2025-04-17\",\n      }",{"title":1030,"description":1031,"authors":1035,"heroImage":829,"date":1037,"body":1038,"category":681,"tags":1039},[1036],"Emilio Salvador","2025-04-17","Today, we're excited to announce the general availability of [GitLab Duo with Amazon Q](https://about.gitlab.com/partners/technology-partners/aws/), delivering agentic AI throughout the software development lifecycle for AWS customers. GitLab Duo with Amazon Q, based on GitLab Ultimate, includes many familiar features such as code completion, code explanation, code generation, chat, and vulnerability explanation and resolution – all of which are now powered by Amazon Q. It is available with a Self-Managed deployment model for customers on Amazon Web Services (AWS).\n\nWith Amazon Q's agents directly embedded into GitLab's DevSecOps platform, developers maintain their familiar development environment while gaining powerful AI capabilities. The result is a frictionless experience that helps accelerate development cycles, reduce manual effort, and enhance code quality.\n\n“Participating in the early access program for GitLab Duo with Amazon Q has given us a glimpse into its transformative potential for our development workflows,” said Osmar Alonso, DevOps Engineer, Volkswagen Digital Solutions. “Even in its early stages, we saw how the deeper integration with autonomous agents could streamline our process, from code commit to production. We're excited to see how this technology empowers our team to focus on innovation and accelerate our digital transformation.\"\n\n## Agentic AI comes to complex customer environments\n\nBy combining agentic AI with secure, reliable cloud infrastructure, GitLab and AWS bring built-in security, scale, and reliability to complex customer environments, enabling them to realize the following benefits:\n\n__Unified developer experience for streamlined development__\n\nDevelopers can interact with Amazon Q through the GitLab Duo Chat interface from their preferred IDE or the GitLab web interface. This eliminates the need for context switching in other tools and helps developers stay focused on the project that they’re working on.\n\n__One solution for the entire software development lifecycle__\n\nCode suggestions and optimizations leverage AWS-specific patterns and practices, while testing tools understand AWS service interactions and dependencies. A common data store across all stages provides essential context to AI agents, enabling complete visibility and traceability for relevant actions.\n\n__Secure development with enterprise-grade guardrails__\n\nEnd-to-end security and compliance are built directly into the development platform with guardrails that help reduce risk without impeding velocity. This secure software development approach enforces transparency and auditability through AI agents while seamlessly integrating with AWS security services and compliance frameworks.\n\n## How to start using GitLab Duo with Amazon Q\n\nHere are five initial use cases we’re targeting to help teams build secure software faster with agentic AI: \n\n1. **Feature development acceleration** - Create issue descriptions, generate implementation plans based on your existing codebase, and produce complete merge requests ready for review. This drives feature delivery acceleration while maintaining consistency with internal development standards.  \n2. **Legacy application modernization** - Analyze your legacy Java codebase, create a comprehensive upgrade plan, and generate a merge request with all necessary code changes. This unlocks faster Java upgrade time, while providing a clear audit trail of all code transformations. Support for .NET and other languages is planned for future releases.  \n3. **Quality assurance enhancement** - Analyze code and automatically create comprehensive unit tests that understand your application logic and AWS service interactions. This increases test coverage, reduces manual test writing effort, and helps ensure consistent test quality across applications.  \n4. **Code review optimization** - Provide inline feedback on code changes, suggesting improvements based on development standards, highlighting security and performance considerations. This enables reduced code review cycles and delivery of higher-quality code merges for deployment.  \n5. **Vulnerability remediation** - Explain detected vulnerabilities in clear, detailed terms and one-click remediation based on recommended code changes, helping to significantly reduce the time from detection to remediation.\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n> #### Get the benefits of GitLab Duo with Amazon Q today\n> GitLab's unified, AI-powered DevSecOps platform with Amazon Q's advanced AI capabilities provides AWS customers with a solution that transforms how teams build and deploy software. To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).",[683,482,9,685,686,729],{"slug":1041,"featured":91,"template":690},"gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws","content:en-us:blog:gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","Gitlab Duo With Amazon Q Agentic Ai Optimized For Aws","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws.yml","en-us/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws",{"_path":1047,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1048,"content":1053,"config":1058,"_id":1060,"_type":14,"title":1061,"_source":16,"_file":1062,"_stem":1063,"_extension":19},"/en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"title":1049,"description":1050,"ogTitle":1049,"ogDescription":1050,"noIndex":6,"ogImage":829,"ogUrl":1051,"ogSiteName":673,"ogType":674,"canonicalUrls":1051,"schema":1052},"GitLab Duo with Amazon Q: DevSecOps meets agentic AI","AI-powered DevSecOps enhanced with autonomous AI agents accelerates developer productivity, application modernization, and innovation.","https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo with Amazon Q: DevSecOps meets agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emilio Salvador\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":1049,"description":1050,"authors":1054,"heroImage":829,"date":1055,"body":1056,"category":681,"tags":1057},[1036],"2024-12-03","We're excited to announce GitLab Duo with Amazon Q, a joint offering that brings together GitLab's comprehensive AI-powered DevSecOps platform with Amazon Q's autonomous AI agents in a single, integrated solution.\n\nGitLab Duo with Amazon Q transforms software development by integrating powerful AI agents directly into your daily workflows. Instead of switching between multiple tools, developers can now accelerate key tasks — from feature development to code reviews — all from within GitLab's comprehensive DevSecOps platform. Amazon Q’s AI agents act as intelligent assistants, automating time-consuming tasks like generating code from requirements, creating unit tests, conducting code reviews, and modernizing Java applications. By handling these complex tasks, this joint offering helps teams focus on innovation, while maintaining security and quality standards.\n\nThis enterprise-class developer experience includes:\n* The GitLab unified platform with one single data store, which automates the building, testing, packaging, and deployment of secure code\n* GitLab Duo, enhanced with Amazon Q developer, which leverages GitLab project context to generate multi-file changes based on the task\n* Amazon Q AI agents integrated with GitLab Duo, updating issues and creating merge requests per task, with permission scoped to the project\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1033653810?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo and Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Partnership innovation: GitLab and AWS\n\nGitLab Duo with Amazon Q is the result of close collaboration between GitLab and AWS engineering teams, combining our strengths to transform software development. This partnership unites GitLab's expertise in unified DevSecOps with AWS's leadership in cloud computing, creating an innovative solution that understands how developers work.\n\nBy integrating Amazon Q's autonomous agents with GitLab's comprehensive AI-powered platform, we've built more than a technical integration. We've created an experience that makes AI-powered development feel natural and upholds the security, compliance, and reliability that enterprises require.\n\nIndustry analysts recognize the significance of this integration in advancing AI-powered software development:\n\n***\"With this joint offering, GitLab and AWS are combining their strengths to make agentic AI a reality in software development,\" said Katie Norton, Research Manager at IDC. \"GitLab Duo with Amazon Q addresses strong use cases and critical challenges that empower customers to harness the full potential of AI.\"***\n\n***\"Both developers and the organizations they work for are increasingly interested in simplified and unified experiences,\" says Rachel Stephens, senior analyst at RedMonk. \"Especially in the era of AI – when security and privacy are paramount concerns – organizations want to both harness the power of cutting edge technology while also controlling risk and minimizing disjointed software tool chains. The partnership between GitLab Duo and Amazon Q seeks to give developers the tools they need within the context of an end-to-end DevSecOps experience.\"***\n\n## 4 key customer benefits \n\nGitLab Duo with Amazon Q pairs AI-powered DevSecOps with the deepest set of cloud computing capabilities. Together, they help development teams:\n\n### 1. Streamline feature development from idea to code \n\nDevelopment teams often spend hours translating requirements into code, leading to slower delivery and inconsistent implementation. You can now invoke the GitLab Duo with Amazon Q agent by utilizing a new quick action `/q dev`, which will convert an issue description directly into merge-ready code in minutes. The agent analyzes requirements, plans the implementation, and generates a complete merge request — all while adhering to your team's development standards. Teams can iterate rapidly using feedback in comments, significantly reducing the time from idea to production.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050110?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Feature Dev with Rev\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Modernize legacy code without the headache \n\nUpgrading Java applications traditionally requires weeks of careful planning, manual code changes, and extensive testing. By using quick action `/q transform`, you can change this by automating the entire Java modernization process. In minutes, not hours, the agent analyzes your Java 8 or 11 codebase, creates a comprehensive upgrade plan, and generates fully documented merge requests for Java 17 migration. Every change is tracked and traceable, giving teams confidence while improving application security and performance.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050145?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"QCT\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. Accelerate code reviews without sacrificing quality \n\nCode reviews often create bottlenecks: Teams wait days for feedback yet must maintain consistent standards. With the `/q review` quick action, you get instant, intelligent feedback on code quality and security directly in merge requests. By automatically identifying potential issues and suggesting improvements based on your standards, teams can maintain high-quality code while dramatically reducing review cycles.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050136?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Code Reviews\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Automate testing to ship with confidence\n\nManual test creation is time-consuming and often leads to inconsistent coverage across teams. With the `/q test` quick action, you can automatically generate comprehensive unit tests that understand your application logic. The agent ensures thorough coverage of critical paths and edge cases, matching your existing testing patterns. This automation helps teams catch issues earlier and maintain consistent quality standards, saving valuable developer time.\n\n\u003Cdiv style=\"padding:54.37% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1034050181?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Use GitLab Duo with Amazon Q to add tests\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Enterprise-grade security and guardrails included\n\nBuilt for enterprise scale and security, this offering combines GitLab's integrated security, compliance, and privacy with Amazon Q's AI agent, accelerating developer workflows to help organizations ship secure software faster.\n\nThe integration features:\n\n* Built-in guardrails that maintain development velocity  \n* Granular controls for AI-powered features at user, project, and group levels  \n* End-to-end security integration with existing workflows\n\nDevSecOps teams can securely scale the development environment with the world's most broadly adopted cloud.\n\n## What's next\n\nGitLab Duo with Amazon Q builds on our existing integration with [AWS announced in May 2024](https://press.aboutamazon.com/2024/4/aws-announces-general-availability-of-amazon-q-the-most-capable-generative-ai-powered-assistant-for-accelerating-software-development-and-leveraging-companies-internal-data), representing a significant step forward in our joint mission to transform software development. This deeper integration of AI capabilities marks the beginning of our expanded collaboration with AWS. As we continue to evolve these capabilities, we'll focus on:\n\n* Extending AI features across the development lifecycle  \n* Enhancing developer productivity  \n* Meeting enterprise development demands at scale\n\n**GitLab Duo with Amazon Q is available today on a [public branch](https://gitlab.com/groups/gitlab-org/-/epics/16059) in the GitLab.org project. To get access to a preview and learn more about how it can transform your software development process, visit [our website](https://about.gitlab.com/partners/technology-partners/aws/#interest).**",[729,9,683,482,231],{"slug":1059,"featured":91,"template":690},"gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai","content:en-us:blog:gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","Gitlab Duo With Amazon Q Devsecops Meets Agentic Ai","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai.yml","en-us/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai",{"_path":1065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1066,"content":1072,"config":1077,"_id":1079,"_type":14,"title":1080,"_source":16,"_file":1081,"_stem":1082,"_extension":19},"/en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"title":1067,"description":1068,"ogTitle":1067,"ogDescription":1068,"noIndex":6,"ogImage":1069,"ogUrl":1070,"ogSiteName":673,"ogType":674,"canonicalUrls":1070,"schema":1071},"GitLab is now available as an AWS CodeStar Connections provider","AWS released native CodePipeline integration for GitLab projects and repos, helping to ensure a best-in-class experience when using GitLab and AWS together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098884/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098884409.jpg","https://about.gitlab.com/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now available as an AWS CodeStar Connections provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-01-11\",\n      }",{"title":1067,"description":1068,"authors":1073,"heroImage":1069,"date":1074,"body":1075,"category":773,"tags":1076},[705],"2024-01-11","The GitLab DevSecOps Platform now integrates natively with many AWS services through AWS CodeStar Connections and AWS CodePipeline. This long-awaited integration was recently completed by the AWS CodeSuite service team for GitLab.com SaaS, GitLab Self-Managed, and GitLab Dedicated. AWS CodeStar Connections is a utility layer, which means other AWS services can enable native GitLab integration with less work.\n\nOnce created, CodeStar Connections objects can be used directly to integrate with many AWS services such as:\n- AWS CodePipeline,\n- Amazon CodeWhisperer Customization Capability,\n- AWS Service Catalog\n- AWS Glue\n\nWhen a CodeStar Connection is used to configure a GitLab CodePipeline configuration it can further support:\n- AWS CodeBuild\n- Amazon SageMaker MLOps Projects\n- AWS CodeDeploy\n\nGitLab and AWS have been working at ever deeper levels of technical and business integration to ensure that our co-customers have a best-in-class experience when using GitLab and AWS together.\n\n![AWS CodeStar integration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098900704.png)\n\nCheck out the complete list of AWS Services that are now directly accessible in the [GitLab AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html).\n\n![CodeStar - New Technology and Solutions for using GitLab and AWS Together ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/AWS_re_Invent_2023__New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together__4__aHR0cHM6_1750098900705.png)\n\n## Resources\n\n- GitLab [AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html) is a one-stop location for these new integrations as well as existing integrations\n- AWS documentation for [setting up CodeStar Connections with GitLab.com SaaS](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n- AWS documentation for [setting up CodeStar Connections with self-managed GitLab](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n - AWS documentation for [configuring AWS CodePipeline integration](https://docs.gitlab.com/ee/user/project/integrations/aws_codepipeline.html)\n- [AWS announcement for GitLab CodePipeline Integration for GitLab SaaS](https://aws.amazon.com/about-aws/whats-new/2023/08/aws-codepipeline-supports-gitlab/) and [AWS announcement for GitLab Self-Managed](https://aws.amazon.com/about-aws/whats-new/2023/12/codepipeline-gitlab-self-managed/)\n\n![codestar-amazonpartnerlogo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098900705.png)\n",[9,109,282,231],{"slug":1078,"featured":6,"template":690},"gitlab-is-now-available-as-an-aws-codestar-connections-provider","content:en-us:blog:gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","Gitlab Is Now Available As An Aws Codestar Connections Provider","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"_path":1084,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1085,"content":1091,"config":1097,"_id":1099,"_type":14,"title":1100,"_source":16,"_file":1101,"_stem":1102,"_extension":19},"/en-us/blog/little-things-make-a-difference",{"title":1086,"description":1087,"ogTitle":1086,"ogDescription":1087,"noIndex":6,"ogImage":1088,"ogUrl":1089,"ogSiteName":673,"ogType":674,"canonicalUrls":1089,"schema":1090},"Little things make a difference","Let's celebrate the small UI refinements that add up to create a big impact","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/little-things-make-a-difference","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Little things make a difference\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christie Lenneville\"}],\n        \"datePublished\": \"2021-02-12\",\n      }",{"title":1086,"description":1087,"authors":1092,"heroImage":1088,"date":1094,"body":1095,"category":959,"tags":1096},[1093],"Christie Lenneville","2021-02-12","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWhen you're busy focusing on the big picture of feature improvement work, it can be easy to forget the value of tiny refinements. But when you add them all up, fixing little \"paper cuts\" can have a meaningful impact on user experience. \n\nThat's why I was so excited to see the [GitLab UI Polish Gallery](https://nicolasdular.gitlab.io/gitlab-polish-gallery/) created by [Nicolas Dular](https://gitlab.com/nicolasdular), a Senior Fullstack Engineer on our Growth team. It highlights small refinement contributions &#151; like adjusting alignment, spacing, and type scale &#151; that are easy to overlook. But seeing them in aggregate, you quickly realize what a difference they make.\n\nFor me, the most inspiring part of the gallery was seeing such a diverse group of people contribute to making our product the best it can be. Developers, designers, and members of the wider GitLab community (special shout out to [Yogi](https://gitlab.com/yo)) all care enough about our product experience to put time into small changes.\n\nHere are a few examples, but I encourage you to check out the gallery for yourself!\n\n## Polishing the Jira Connect app\n\nOur [Jira Connect](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud?hosting=cloud&tab=overview) app helps customers use GitLab in coordination with Jira for a more seamless developer experience. [Libor Vanc](https://gitlab.com/lvanc) (Senior Product Designer) and [Justin Ho](https://gitlab.com/justin_ho) (Senior Frontend Engineer) on our Ecosystem team made some light changes to the app's type scale and CTAs that make the app much simpler to visually parse. What a nice change!\n\n![GitLab Jira Connect app](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/jira-connect-gitlab.png)\n\n## Addressing alignment problems in the merge request widget\n\nMerge requests are central to our user experience, and we're working hard to make the experience exceptional. When Staff Product Designer, [Pedro Moreira da Silva](https://gitlab.com/pedroms), noticed alignment problems in the MR widget, he worked with Senior Frontend Engineer, [Jacques Erasmus](https://gitlab.com/jerasmus), to address them. It was a very subtle change that will impact millions of users.\n\n![Reply box in diffs](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/widget-alignment.png)\n\n## Fixing the vertical alignment in card headers\n\nThis change is so subtle that it's hard to even notice, but the vertical alignment in the card header of our on-demand security scans was off by mere pixels. Product Designer, [Annabel Dunstone Gray](https://gitlab.com/annabeldunstone), noticed the [problem](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50550#note_480509692) during an MR review, and Frontend Engineer, [Paul Gascou Vaillancourt](https://gitlab.com/pgascouvaillancourt), jumped in to fix it in the same release.\n\n![Card header vertical alignment](https://about.gitlab.com/images/blogimages/little-things-make-a-difference/card-header.png)\n\n## More to come!\n\nWe make visual refinements all of the time, so this is just a start to what you'll see in the [GitLab UI Polish Gallery](https://nicolasdular.gitlab.io/gitlab-polish-gallery/). I'll personally be checking in from time to time to remind myself of the little things that make a big difference.\n\n",[732,9],{"slug":1098,"featured":6,"template":690},"little-things-make-a-difference","content:en-us:blog:little-things-make-a-difference.yml","Little Things Make A Difference","en-us/blog/little-things-make-a-difference.yml","en-us/blog/little-things-make-a-difference",{"_path":1104,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1105,"content":1111,"config":1118,"_id":1120,"_type":14,"title":1121,"_source":16,"_file":1122,"_stem":1123,"_extension":19},"/en-us/blog/lockheed-martin-aws-gitlab",{"title":1106,"description":1107,"ogTitle":1106,"ogDescription":1107,"noIndex":6,"ogImage":1108,"ogUrl":1109,"ogSiteName":673,"ogType":674,"canonicalUrls":1109,"schema":1110},"GitLab, AWS help strengthen Lockheed Martin’s digital transformation","Lockheed Martin’s software factory selected GitLab’s DevSecOps Platform, along with AWS, to streamline toolchains, increase collaboration, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668830/Blog/Hero%20Images/lockheed-martin-cover-2.jpg","https://about.gitlab.com/blog/lockheed-martin-aws-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab, AWS help strengthen Lockheed Martin’s digital transformation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2023-05-16\",\n      }",{"title":1106,"description":1107,"authors":1112,"heroImage":1108,"date":1114,"body":1115,"category":773,"tags":1116},[1113],"GitLab","2023-05-16","\nLockheed Martin launched its 1LMX initiative to transform its end-to-end business processes and systems. One focus of the transformation was to pare down the company’s wide variety of DevOps tools – each program or product line at Lockheed Martin had its own toolchain. To mitigate this issue, drive rapid production, and increase collaboration, Lockheed Martin adopted GitLab’s DevSecOps Platform, run on AWS.\n\n“GitLab has strengthened our 1LMX transformation, upgrading the way we collaborate and innovate to develop software. Now, all of our programs have access to a high-quality software development environment,” said Alan Hohn, Lockheed Martin’s Director of Software Strategy.\n\nGitLab’s DevSecOps Platform enables Lockheed Martin to ship software more efficiently and securely for thousands of their programs, ranging from satellite platforms and aerospace systems to ground control software and maritime surface and subsurface software.\n\nHere are some top-level benefits that Lockheed Martin has seen with GitLab’s DevSecOps Platform:\n* Using GitLab’s single platform, Lockheed Martin’s legacy projects are delivered to testing every six days, down from a monthly cadence using distributed toolchains.   \n* Developers experienced a 90% reduction in time spent on system maintenance.\n* The organization has seen 200% annual growth in adoption of The DevSecOps Platform.\n* AWS enabled automated Infrastructure as Code for a scalable and resilient cloud architecture.\n\n## Efficiency gains\n\nIn migrating to GitLab, Lockheed Martin has realized a number of benefits and eliminated obstacles. In three and a half years, Lockheed Martin has created 64,000 projects on GitLab, and created 110,000 continuous integration builds daily. \n\nAdditionally, they were able to retire thousands of separately maintained servers thereby reducing time spent on maintenance by 90%. GitLab further enables internal efficiency within the organization by allowing teams to securely share reusable code components in globally accessible environments. Since implementing GitLab, Lockheed Martin teams have added 18 new repositories a day for the past two years. \n\n## How GitLab, AWS, and Lockheed Martin work together\n\nIn 2022, after rapid adoption of GitLab created the need for a more scalable solution, Lockheed Martin, GitLab, and AWS worked together to automate and optimize Lockheed Martin's code deployment across the enterprise. \n\nThe solution started with a well-architected review of the design between Lockheed Martin, AWS, and GitLab. AWS then helped to automate and optimize the Lockheed Martin GitLab deployment for continuous integration and continuous delivery (CI/CD) environment by delivering Infrastructure as Code to deploy the environment in two hours vs. several hours previously. \n\nThe AWS team also established workflows to deliver a fully automated, highly available, disaster recovery-compliant, scalable architecture for GitLab enabling a consistent process that runs without manual intervention.\n\nAWS supported load balancing to auto-scale the deployment process based on developer demand for pipeline runs and user traffic so that developers are not waiting on their deployments to execute. Pre-migration testing was performed to establish baselines, followed by post-migration testing to measure performance and scalability gains in delivering faster deployments. \n\nAdditionally, monitoring and security controls were implemented to comply with Lockheed Martin policies. As a result, the team was able to deliver operational efficiencies with the number of build requests waiting to be processed decreasing from 200 to zero, and reduced time for code deployment across the enterprise.\n\nThis effort showcased how large enterprises with thousands of software developers can build and deploy automated, scalable, and resilient code pipelines in the cloud using platforms such as GitLab by leveraging AWS best practices.\n\nGitLab’s Chief Product Officer David DeSanto added, “For more than a century, Lockheed Martin has set the standard for innovation within the public sector, and demonstrates what is possible when organizations invest in digital transformation efforts.”\n\nLockheed Martin has 20,000 GitLab users, and is looking to double that number and migrate even more of their projects over to The DevSecOps Platform in the coming years. To dig deeper into how Lockheed Martin uses GitLab, read [our case study](/customers/lockheed-martin), and to learn more about GitLab for the Public Sector, visit [our site](/solutions/public-sector/).\n",[1117,9,915],"customers",{"slug":1119,"featured":6,"template":690},"lockheed-martin-aws-gitlab","content:en-us:blog:lockheed-martin-aws-gitlab.yml","Lockheed Martin Aws Gitlab","en-us/blog/lockheed-martin-aws-gitlab.yml","en-us/blog/lockheed-martin-aws-gitlab",{"_path":1125,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1126,"content":1132,"config":1138,"_id":1140,"_type":14,"title":1141,"_source":16,"_file":1142,"_stem":1143,"_extension":19},"/en-us/blog/microcks-and-gitlab-part-one",{"title":1127,"description":1128,"ogTitle":1127,"ogDescription":1128,"noIndex":6,"ogImage":1129,"ogUrl":1130,"ogSiteName":673,"ogType":674,"canonicalUrls":1130,"schema":1131},"Speed up API and microservices delivery with Microcks and GitLab - Part 1","Learn how to configure Microcks for GitLab and what the use cases are for this open source Kubernetes-native tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683021/Blog/Hero%20Images/lightsticks.png","https://about.gitlab.com/blog/microcks-and-gitlab-part-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up API and microservices delivery with Microcks and GitLab - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":1127,"description":1128,"authors":1133,"heroImage":1129,"date":1135,"body":1136,"category":708,"tags":1137},[1134],"Madou Coulibaly","2023-09-27","\n\nAPI development is all the rage these days for customer and partner integration, frontend-to-backend communication, microservices orchestration, and more. Yet APIs have their challenges, including how to create a fast feedback loop on design, how different teams can work with autonomy without having to wait for each other's API implementation, and how to cope with backward compatibility tests when shipping newer versions of the API. \n\n[Microcks](https://microcks.io), an open source, Kubernetes-native tool for API mocking and testing, addresses these challenges. With Microcks, which is accepted as a Sandbox project in the [Cloud Native Computing Foundation](https://cncf.io), developers can leverage their [OpenAPI](https://www.openapis.org/), [GraphQL](https://graphql.org/), [gRPC](https://grpc.io/), [AsyncAPI](https://www.asyncapi.com/), and [Postman Collection](https://www.postman.com/collection/) assets to quickly mock and simulate APIs before writing them. Couple Microcks with GitLab and you have a powerful combination to foster collaboration, encourage rapid changes, and provide a robust delivery platform for API-based applications.\n\nIn this ongoing blog series, we will introduce you to Microcks use cases and how they fit with the GitLab platform. We'll also discuss technical integration points that will help ease the developer burden, including identity management, Git repositories, and pipeline integrations.\n\n## What is Microcks?\nMicrocks addresses two major use cases: \n- **Simulating (or mocking) an API or a microservice** from a set of descriptive assets. This can be done as soon as you start the design phase to set up a feedback loop very quickly, or later on to ease the pain of provisioning environments with a lot of dependencies.\n- **Validating the conformance of your application regarding your API specification** by running contract-test. This validation can be integrated into your CI/CD pipeline so that conformance can be checked on each and every iteration. This is of great help to enforce backward compatibility of your API of microservices interfaces.\n\nMicrocks offers a uniform and consistent approach for the various kinds of request/response APIs (REST, GraphQL, gRPC, Soap) and event-driven APIs (currently supporting eight different protocols), thereby bringing consistency for users and for automations all along your API lifecycle.\n\n## How Microcks fits into the software development lifecycle\nMicrocks is a solution based on containers and can be deployed in several configurations. It can be deployed on the developer laptop through [Docker](https://microcks.io/documentation/installing/docker-compose/), [Podman](https://microcks.io/documentation/installing/podman-compose/) or [Docker Desktop Extension](https://microcks.io/documentation/installing/docker-desktop-extension/) to assist with mocking complex environments. When it comes to team collaboration, Microcks can be deployed as a centralized instance that connects to the Git repositories of the organization, discovers the API artifacts, and then provides shared up-to-date API simulations.\n\n![diagram of how Microcks fits into development lifecycle](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks.png){: .shadow.small.center}\n\nTo ease the burden on developers (and administrators), Microcks can be configured to use your GitLab platform as an identity provider. With that configuration, integrating Microcks is seamless, and API simulations are automatically shared among development teams. Microcks fosters collaboration by providing everyone with the same “source of truth” and avoiding drift risks. The tool can also be used to lower the pain and the cost of deploying and maintaining complex QA environments because simulations are inexpensive to deploy or redeploy on-demand. Microcks deployment follows a GitOps approach.\n\nBeyond this sharing of simulations, Microcks also integrates well with CI/CD pipelines. As you release API-based applications, there is always concern about conformance of the contractualized expectations you defined using specifications like OpenAPI, GraphQL, and the like. Usually, the hardest part isn't delivering the `1.0` of this API; problems come later when you're trying to deliver the `1.3`. This latest version must still be backward compatible with the 1.0 contract if you don't want to make your consumers angry and frustrated.\n\nThis conformance validation is very well assured by Microcks using contract-testing principles. So we encourage you to plug Microcks into some `test` related jobs in your GitLab pipeline and delegate this conformance validation to your Microcks instance.\n\n![microcks-in-gitlab-workflow](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-in-gitlab-workflow.png){: .shadow.medium.center}\n\n\nEmbedding Microcks conformance testing in your pipeline is actually easy thanks to our lightweight CLI that you'll integrate in pipeline jobs. You can choose to reuse an existing Microcks instance to record results and keep history of your success or pop up a new ephemeral instance as it's lightweight and fast to bootstrap.\n\n## How to set up GitLab as an identity provider in Microcks\n\nTo start off this series, we will detail how to configure Microcks to use your GitLab platform as an identity provider. This is in fact very easy as authentication in Microcks is based on [Keycloak](https://keycloak.org) (another CNCF project) and GitLab can be set as an identity provider in Keycloak (see [official documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#gitlab)).\n\n**Note:** This configuration is optional as Microcks can use any other identity provider Keycloak integrates with.\n\nKeycloak is a very common solution that may be deployed already at your organization. If not, Microcks comes with a Keycloak distribution that is pre-configured for its usage with a realm called `microcks`. We have used this realm to validate this configuration.\n\n### Create a GitLab Group Application\nThe first thing is to create a new [Group Application](https://docs.gitlab.com/ee/integration/oauth_provider.html#create-a-group-owned-application) on your GitLab instance as follows:\n- `Name`: `microcks-via-keycloak`\n- `Redirect URI`: `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint`\n- `Scopes`: `read_user`, `openid`, `profile` and `email`\n\n![gitlab-application-form](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application-form.png){: .shadow.medium.center}\n\n\nThis application uses your Keycloak instance with `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint` as the redirect URI. As a result, we obtain an `Application ID` and an associated `Secret` we have to keep aside for the next step.\n\n![gitlab-application](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application.jpeg){: .shadow.medium.center}\n\n\n### Add GitLab as identity provider in Keycloak\nThe next step takes place in the Keycloak admin console. Once the correct `microcks` realm is selected, you'll just have to go to the **Identity providers** section and add a GitLab provider. Simply paste here the `Application ID` you got earlier as `Client ID` and the `Secret` as `Client Secret`. You can also choose a `Display order` if you plan to have multiple identity providers.\n\n![keycloak-identity-provider](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-identity-provider.jpg){: .shadow.medium.center}\n\n\nThen, from the **Authentication** section in the admin console, choose the browser flow and configure the `Identity Provider Redirector` as follows:\n\n- `Alias`: `GitLab`\n- `Default Identify Provider`: `gitlab`\n\n![keycloak-redirector](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-redirector.jpg){: .shadow.medium.center}\n\n### Test your Microcks configuration\nNow open the Microcks URL into your browser and you'll be directly redirected to the GitLab login page. Enter your GitLab credentials and you will be authenticated and redirected to Microcks. \n\n![microcks-homepage](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-homepage.jpeg){: .shadow.medium.center}\n\n## What's next?\nIn upcoming blogs, we'll detail how GitLab can be used in the two major use cases for Microcks. We'll see how Microcks integrates with GitLab Git repositories to discover API specifications and produce simulations, and how to integrate Microcks conformance tests into your GitLab CI/CD pipelines.\n\n_[Laurent Broudoux](https://www.linkedin.com/in/laurentbroudoux/) is a cloud-native architecture expert and enterprise integration problem lover. He has helped organizations in adopting distributed and cloud paradigms while capitalizing on their critical existing assets. He is the founder and lead developer of the [Microcks.io](https://microcks.io/) open-source project: a Kubernetes-native tool for API mocking and testing. For this, he is using his 10+ years experience as an architect in financial services where he defined API transformation strategies, including governance and delivery process._\n\n_[Madou Coulibaly](https://gitlab.com/madou) is a senior solutions architect at GitLab._\n",[9,874,109,1001,231],{"slug":1139,"featured":6,"template":690},"microcks-and-gitlab-part-one","content:en-us:blog:microcks-and-gitlab-part-one.yml","Microcks And Gitlab Part One","en-us/blog/microcks-and-gitlab-part-one.yml","en-us/blog/microcks-and-gitlab-part-one",{"_path":1145,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1146,"content":1152,"config":1158,"_id":1160,"_type":14,"title":1161,"_source":16,"_file":1162,"_stem":1163,"_extension":19},"/en-us/blog/secure-composition-analysis-bug-not-updating-database",{"title":1147,"description":1148,"ogTitle":1147,"ogDescription":1148,"noIndex":6,"ogImage":1149,"ogUrl":1150,"ogSiteName":673,"ogType":674,"canonicalUrls":1150,"schema":1151},"Bug found and resolved in Dependency Scanning","Some customers will need to take specific action to manually update their Dependency Scanning image to receive a bug fix.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/secure-composition-analysis-bug-not-updating-database","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Bug found and resolved in Dependency Scanning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nicole Schwartz\"}],\n        \"datePublished\": \"2021-02-19\",\n      }",{"title":1147,"description":1148,"authors":1153,"heroImage":1149,"date":1155,"body":1156,"category":959,"tags":1157},[1154],"Nicole Schwartz","2021-02-19","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nDependency Scanning relies on the GitLab [Vulnerability Database](https://about.gitlab.com/direction/secure/vulnerability-research/advisory-database/) (called [gemnasium-db](https://gitlab.com/gitlab-org/security-products/gemnasium-db)) to provide it with the latest advisory data (i.e. CVEs). Dependency Scanning docker images are built and released with the latest version of the database and in addition, the analyzers update this database to the latest version at the time of a scan. \n\nHowever, starting with version 2.8.1 of the Dependency Scanning analyzer called gemnasium, the vulnerability database was [not updating itself at scan time](https://gitlab.com/gitlab-org/gitlab/-/issues/294296). Versions between v2.8.1 (released 2020-03-30) and v2.28.0 (released 2021-02-03) are affected by this bug. As a result, since the introduction of the bug, scan results would only be able to identify advisories published on or before the analyzer image release date. In some cases this meant that the advisories' Dependency Scanning analyzers were outdated by several weeks (relying only on the database checked out at image build time).\n\nWe are concerned that this bug made it out to customers and are performing a [root cause analysis](https://gitlab.com/gitlab-org/gitlab/-/issues/321315).\n\nMost customers will receive the bug fix automatically and will have the latest advisory database the next time their Dependency Scanning jobs run. But customers with their own copy of the GitLab container registry or dedicated runners with a docker pull-policy other than always, must take the manual action to pull or update your pin to the latest image (or at least one that is not impacted by this bug). Users that must take this manual action are:\n\n- Customers with an edited Dependency Scanning template that pins their analyzers to a non-major-only tag (for example gemnasium:2.27.0 rather than gemnasium:2)\n- Customers running in an [Offline Environment](https://docs.gitlab.com/ee/user/application_security/offline_deployments/) with their own container registry mirroring GitLab's\n- Self-managed customers or customers with their own docker runners using a pull policy other than `always`\n\nThe three analyzer types that are affected are the gemnasium analyzer, the gemnasium-python and gemnasium-maven analyzer. The affected versions of each are:\n\n- gemnasium v2.8.1 to v2.28.0: update to v2.28.1 or above\n- gemnasium-python v2.11.0 to v2.17.2: update to v2.17.3 or above\n- gemnasium-maven v2.13.0 to v2.20.3: update to v2.20.4 or above\n\nTL;DR - If you are using Dependency Scanning analyzers and are not always pulling their docker images from GitLab's docker container registry, please update your analyzers' docker images promptly in order to sync the analyzers with the latest available advisories.\n\n{: .note}\n",[686,916,9],{"slug":1159,"featured":6,"template":690},"secure-composition-analysis-bug-not-updating-database","content:en-us:blog:secure-composition-analysis-bug-not-updating-database.yml","Secure Composition Analysis Bug Not Updating Database","en-us/blog/secure-composition-analysis-bug-not-updating-database.yml","en-us/blog/secure-composition-analysis-bug-not-updating-database",{"_path":1165,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1166,"content":1171,"config":1176,"_id":1178,"_type":14,"title":1179,"_source":16,"_file":1180,"_stem":1181,"_extension":19},"/en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"title":1167,"description":1168,"ogTitle":1167,"ogDescription":1168,"noIndex":6,"ogImage":829,"ogUrl":1169,"ogSiteName":673,"ogType":674,"canonicalUrls":1169,"schema":1170},"Speed up code reviews: Let AI handle the feedback implementation","Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.","https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up code reviews: Let AI handle the feedback implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-10\",\n      }",{"title":1167,"description":1168,"authors":1172,"heroImage":829,"date":1173,"body":1174,"category":681,"tags":1175},[678],"2025-06-10","You know that feeling when you've just submitted a merge request and the code review comments start rolling in? One reviewer wants the labels updated, another asks for side-by-side layouts, someone else requests bold formatting, and don't forget about that button color change. Before you know it, you're spending hours implementing feedback that, while important, takes you away from building new features. It's a time-consuming process that every developer faces, yet it feels like there should be a better way.\n\nWhat if you could have an AI assistant that understands code review feedback and automatically implements the changes for you? That's exactly what [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) brings to your development workflow. This seamless integration combines GitLab's comprehensive DevSecOps platform with Amazon Q's advanced AI capabilities, creating an intelligent assistant that can read reviewer comments and converts them directly into code changes. Instead of manually addressing each piece of feedback, you can let AI handle the implementation while you focus on the bigger picture.\n\n## How GitLab Duo with Amazon Q works\n\nWhen you're viewing a merge request with reviewer comments, you'll see feedback scattered throughout your code. Let's take the examples from earlier in this article: maybe you've received a request to update a form label here, a suggestion to display fields side-by-side there, or a note about making certain text bold. Each comment represents a task that normally you'd need to handle manually.\n\n![feedback on an MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/1-show-comment.png)\n\nWith GitLab Duo with Amazon Q, you can simply enter the `/q dev` quick action in a comment. This prompts Amazon Q to analyze all the feedback and start modifying your code automatically. The AI agent understands the context of each comment and implements the requested changes directly in your codebase.\n\n![/q dev function prompting Amazon Q to analyze feedback](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/2-invoke-q-dev.png)\n\nOnce Amazon Q processes the feedback, you can view all the updates in the \"Changes\" tab of your merge request. Every modification is clearly visible, so you can verify that the AI agent correctly interpreted and implemented each piece of feedback. You can then run your updated application to confirm that all the changes work as expected — that form label is updated, the fields are displayed side-by-side, the text is bold, and yes, that button is now blue.\n\nWatch the code review feedback process in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/31E9X9BrK5s?si=ThFywR34V3Bfj1Z-\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nProcessing code review feedback is a necessary but time-intensive part of software development.  GitLab Duo with Amazon Q evolves this manual process into an automated workflow, dramatically reducing the time between receiving feedback and implementing changes. By letting AI handle these routine modifications, you're free to focus on what really matters — building innovative features and solving complex problems.\n\nWith GitLab Duo with Amazon Q, you can:\n- Eliminate hours of manual feedback implementation\n- Accelerate your code review cycles\n- Maintain consistency in how feedback is addressed\n- Reduce context switching between reviewing comments and writing code\n- Ship features faster with streamlined deployment times\n\n> #### To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)",[685,686,482,9,684,687],{"slug":1177,"featured":91,"template":690},"speed-up-code-reviews-let-ai-handle-the-feedback-implementation","content:en-us:blog:speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","Speed Up Code Reviews Let Ai Handle The Feedback Implementation","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"_path":1183,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1184,"content":1190,"config":1199,"_id":1201,"_type":14,"title":1202,"_source":16,"_file":1203,"_stem":1204,"_extension":19},"/en-us/blog/tracking-down-missing-tcp-keepalives",{"title":1185,"description":1186,"ogTitle":1185,"ogDescription":1186,"noIndex":6,"ogImage":1187,"ogUrl":1188,"ogSiteName":673,"ogType":674,"canonicalUrls":1188,"schema":1189},"Tracking TCP Keepalives: Lessons in Docker, Golang & GitLab","An in-depth recap of debugging a bug in the Docker client library.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680874/Blog/Hero%20Images/network.jpg","https://about.gitlab.com/blog/tracking-down-missing-tcp-keepalives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":1191,"description":1186,"authors":1192,"heroImage":1187,"date":1194,"body":1195,"category":708,"tags":1196},"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab",[1193],"Stan Hu","2019-11-15","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-03.\n{: .alert .alert-info .note}\n\nWhat began as failure in a GitLab static analysis check led to a\ndizzying investigation that uncovered a subtle [bug in the Docker client\nlibrary code](https://github.com/docker/for-linux/issues/853) used by\nthe GitLab Runner. We ultimately worked around the problem by upgrading\nthe Go compiler, but in the process we uncovered an unexpected change in\nthe Go TCP keepalive defaults that fixed an issue with Docker and GitLab\nCI.\n\nThis investigation started on October 23, when backend engineer [Luke\nDuncalfe](/company/team/#.luke) mentioned, \"I'm seeing\n[`static-analysis` failures with no output](https://gitlab.com/gitlab-org/gitlab/-/jobs/331174397).\nIs there something wrong with this job?\" He opened [a GitLab\nissue](https://gitlab.com/gitlab-org/gitlab/issues/34951) to discuss.\n\nWhen Luke ran the static analysis check locally on his laptop, he saw\nuseful debugging output when the test failed. For example, an extraneous\nnewline would accurately be reported by Rubocop. However, when the same\ntest ran in GitLab's automated test infrastructure, the test failed\nquietly:\n\n![Failed job](https://about.gitlab.com/images/blogimages/docker-tcp-keepalive-debug/job-failure.png){: .shadow.center}\n\nNotice how the job log did not include any clues after the `bin/rake\nlint:all` step. This made it difficult to determine whether a real\nproblem existed, or whether this was just a flaky test.\n\nIn the ensuing days, numerous team members reported the same problem.\nNothing kills productivity like silent test failures.\n\n## Was something wrong with the test itself?\n\nIn the past, we had seen that if that specific test generated enough\nerrors, [the output buffer would fill up, and the continuous integration\n(CI) job would lock\nindefinitely](https://gitlab.com/gitlab-org/gitlab-foss/issues/61432). We\nthought we had [fixed that issue months\nago](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/28402). Upon\nfurther review, that fix seemed to eliminate any chance of a thread\ndeadlock.\n\nDid we have to flush the buffer? No, because the Linux kernel will do\nthat for an exiting process already.\n\n## Was there a change in how CI logs were handled?\n\nWhen a test runs in GitLab CI, the [GitLab\nRunner](https://gitlab.com/gitlab-org/gitlab-runner/) launches a Docker\ncontainer that runs commands specified by a `.gitlab-ci.yml` inside the\nproject repository. As the job runs, the runner streams the output to\nthe GitLab API via PATCH requests. The GitLab backend saves this data\ninto a file. The following sequence diagram shows how this works:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs (1 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n\n== Job sends logs (2 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\n[Henrich Lee Yu](/company/team/#engwan) mentioned\nthat we had recently [disabled a feature flag that changed how GitLab\nhandled CI job\nlogs](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture). [The\ntiming seemed to line\nup](https://gitlab.com/gitlab-org/gitlab/issues/34951#note_236723888).\n\nThis feature, called live CI traces, eliminates the need for a shared\nPOSIX filesystem (e.g., NFS) when saving job logs to disk by:\n\n1. Streaming data into memory via Redis\n2. Persisting the data in the database (PostgreSQL)\n3. Archiving the final data into object storage\n\nWhen this flag is enabled, the flow of CI job logs looks something like\nthe following:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> Redis: Save chunk\nGitLab -> Runner: 202 Accepted\n...\n== Copy 128 KB chunks from Redis to database ==\nGitLab -> Redis: GET gitlab:ci:trace:id:chunks:0\nGitLab -> PostgreSQL: INSERT INTO ci_build_trace_chunks\n...\n== Job finishes ==\n\nRunner -> GitLab: PUT /api/v4/job/:id\nGitLab -> Runner: 200 Job was updated\n\n== Archive trace to object storage ==\n```\n\nLooking at the flow diagram above, we see that this approach has more\nsteps. After receiving data from the runner, something could have gone\nwrong with handling a chunk of data. However, we still had many\nquestions:\n\n1. Did the runners send the right data in the first place?\n1. Did GitLab drop a chunk of data somewhere?\n1. Did this new feature actually have anything to do with the problem?\n1. Are they really making another Gremlins movie?\n\n## Reproducing the bug: Simplify the `.gitlab-ci.yml`\n\nTo help answer those questions, we simplified the `.gitlab-ci.yml` to\nrun only the `static-analysis` step. We inserted a known Rubocop error,\nreplacing a `eq` with `eql`. We first ran this test on a separate GitLab\ninstance with a private runner. No luck there – the job showed the right\noutput:\n\n```\nOffenses:\n\nee/spec/models/project_spec.rb:55:42: C: RSpec/BeEql: Prefer be over eql.\n        expect(described_class.count).to eql(2)\n                                         ^^^\n\n12669 files inspected, 1 offense detected\n```\n\nHowever, we repeated the test on our staging server and found that we\nreproduced the original problem. In addition, the live CI trace feature\nflag had been activated on staging. Since the problem occurred with and\nwithout the feature, we could eliminate that feature as a possible\ncause.\n\nPerhaps something with the GitLab server environment caused a\nproblem. For example, could the load balancers be rate-limiting the\nrunners? As an experiment, we pointed a private runner at the staging\nserver and re-ran the test. This time, it succeeded: the output was\nshown. That seemed to suggest that the problem had more to do with the\nrunner than with the server.\n\n## Docker Machine vs. Docker\n\nOne key difference between the two tests: One runner used a shared,\nautoscaled runner using a [Docker\nMachine](https://docs.docker.com/machine/overview/) executor, and the\nprivate runner used a [Docker\nexecutor](https://docs.gitlab.com/runner/executors/docker.html).\n\nWhat does Docker Machine do exactly? The following diagram may help\nillustrate:\n\n![Docker Machine](https://docs.docker.com/machine/img/machine.png){: .medium.center}\n\nThe top-left shows a local Docker instance. When you run Docker from the\ncommand-line interface (e.g., `docker attach my-container`), the program\njust makes [REST calls to the Docker Engine\nAPI](https://docs.docker.com/engine/api/v1.40/).\n\nThe rest of the diagram shows how Docker Machine fits into the\npicture. Docker Machine is an entirely separate program. The GitLab\nRunner shells out to `docker-machine` to create and destroy virtual\nmachines using cloud-specific (e.g. Amazon, Google, etc.) drivers. Once\na machine is running, the runner then uses the Docker Engine API to run,\nwatch, and stop containers.\n\nNote that this API is used securely over an HTTPS connection. This is an\nimportant difference between the Docker Machine executor and Docker\nexecutor: The former needs to communicate across the network, while the\nlatter can either use a local TCP socket or UNIX domain socket.\n\n## Google Cloud Platform timeouts\n\nWe've known for a while that Google Cloud [has a 10-minute idle\ntimeout](https://cloud.google.com/compute/docs/troubleshooting/general-tips),\nwhich has caused issues in the past:\n\n> Note that idle connections are tracked for a maximum of 10 minutes,\n> after which their traffic is subject to firewall rules, including the\n> implied deny ingress rule. If your instance initiates or accepts\n> long-lived connections with an external host, you should adjust TCP\n> keep-alive settings on your Compute Engine instances to less than 600\n> seconds to ensure that connections are refreshed before the timeout\n> occurs.\n\nWas the problem caused by this timeout? With the Docker Machine\nexecutor, we found that we could reproduce the problem with a simple\n`.gitlab-ci.yml`:\n\n```yaml\nimage: \"busybox:latest\"\n\ntest:\n  script:\n    - date\n    - sleep 601\n    - echo \"Hello world!\"\n    - date\n    - exit 1\n```\n\nThis would reproduce the failure, where we would never see the `Hello\nworld!` output. Changing the `sleep 601` to `sleep 599` would make the\nproblem go away. Hurrah! All we have to do is tweak the system TCP\nkeepalives, right? Google provided these sensible settings:\n\n```sh\nsudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5\n```\n\nHowever, enabling these kernel-level settings didn't solve the\nproblem. Were keepalives even being sent? Or was there some other issue?\nWe turned our attention to network traces.\n\n## Eavesdropping on Docker traffic\n\nIn order to understand what was happening, we needed to be able to\nmonitor the network communication between the runner and the Docker\ncontainer. But how exactly does the GitLab Runner stream data from a\nDocker container to the GitLab server?  The following diagram\nillustrates the flow:\n\n```plantuml\nRunner -> Docker: POST /containers/name/attach\nDocker -> Runner: \u003Ccontainer output>\nDocker -> Runner: \u003Ccontainer output>\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\nFirst, the runner makes a [POST request to attach to the container\noutput](https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach).\nAs soon as a process running in the container outputs some data, Docker\nwill transmit the data over this HTTPS stream. The runner then copies\nthis data to GitLab via the PATCH request.\n\nHowever, as mentioned earlier, traffic between a GitLab Runner and the\nremote Docker machine is encrypted over HTTPS on port 2376. Was there an\neasy way to disable HTTPS? Searching through the code of Docker Machine,\nwe found that it did not appear to be supported out of the box.\n\nSince we couldn't disable HTTPS, we had two ways to eavesdrop:\n\n1. Use a man-in-the-middle proxy (e.g. [mitmproxy](https://mitmproxy.org/))\n1. Record the traffic and decrypt the traffic later using the private keys\n\n## Ok, let's be the man-in-the-middle!\n\nThe first seemed more straightforward, since [we already had experience\ndoing this with the Docker\nclient](https://docs.gitlab.com/ee/administration/packages/container_registry.html#running-the-docker-daemon-with-a-proxy).\n\nHowever, after [defining the proxy variables for GitLab\nRunner](https://docs.gitlab.com/runner/configuration/proxy.html#adding-proxy-variables-to-the-runner-config),\nwe found we were only able to intercept the GitLab API calls with\n`mitmproxy`. The Docker API calls still went directly to the remote\nhost. Something wasn't obeying the proxy configuration, but we didn't\ninvestigate further. We tried the second approach.\n\n## Decrypting TLS data\n\nTo decrypt TLS data, we would need to obtain the encryption keys. Where\nwere these located for a newly-created system with `docker-machine`? It\nturns out `docker-machine` worked in the following way:\n\n1. Call the Google Cloud API to create a new machine\n1. Create a `/root/.docker/machine/machines/:machine_name` directory\n1. Generate a new SSH keypair\n1. Install the SSH key on the server\n1. Generate a new TLS certificate and key\n1. Install and configure Docker on the newly-created machine with TLS certificates\n\nAs long as the machine runs, the directory will contain the information\nneeded to decode this traffic. We ran `tcpdump` and saved the private keys.\n\nOur first attempt at decoding the traffic failed. Wireshark could not\ndecode the encrypted traffic, although general TCP traffic could still\nbe seen. Researching more, we found out why: If the encrypted traffic\nused a [Diffie-Hellman key\nexchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange),\nhaving the private keys would not suffice! This is by design, a property\ncalled [perfect forward\nsecrecy](https://en.m.wikipedia.org/wiki/Forward_secrecy).\n\nTo get around that limitation, we modified the GitLab Runner to disable\ncipher suites that used the Diffie-Hellman key exchange:\n\n```diff\ndiff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\nindex 6b4c6a7c0..a3f86d756 100644\n",[267,1197,537,732,1198,732,9,874,686],"git","google",{"slug":1200,"featured":6,"template":690},"tracking-down-missing-tcp-keepalives","content:en-us:blog:tracking-down-missing-tcp-keepalives.yml","Tracking Down Missing Tcp Keepalives","en-us/blog/tracking-down-missing-tcp-keepalives.yml","en-us/blog/tracking-down-missing-tcp-keepalives",{"_path":1206,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1207,"content":1212,"config":1218,"_id":1220,"_type":14,"title":1221,"_source":16,"_file":1222,"_stem":1223,"_extension":19},"/en-us/blog/tuto-mac-m1-gitlab-ci",{"title":1208,"description":1209,"ogTitle":1208,"ogDescription":1209,"noIndex":6,"ogImage":1149,"ogUrl":1210,"ogSiteName":673,"ogType":674,"canonicalUrls":1210,"schema":1211},"How to use Scaleway to self-host your GitLab Runners","Learn how to set up GitLab CI for your iOS and macOS projects using a hosted Mac mini M1.","https://about.gitlab.com/blog/tuto-mac-m1-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Scaleway to self-host your GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benedikt Rollik\"}],\n        \"datePublished\": \"2021-06-07\",\n      }",{"title":1208,"description":1209,"authors":1213,"heroImage":1149,"date":1215,"body":1216,"category":708,"tags":1217},[1214],"Benedikt Rollik","2021-06-07","\nGitLab's complete DevOps platform comes with built-in continuous integration (CI) and continuous delivery (CD) via [GitLab CI/CD](https://docs.gitlab.com/ee/ci/). GitLab CI/CD is a great solution to increase developer productivity and motivation to write higher-quality code without sacrificing speed. It runs a series of tests every time a commit is pushed, providing immediate visibility into the results of changes in the codebase. While it is not a hassle to set up a CI using Linux-based machines, iOS and macOS developers may find it is more complicated to have access to a Mac that is connected and available 24 hours a day.\n\nGitLab Runners, provided on GitLab.com, are the engine that executes CI workflows. Due to various requirements, some users may opt to self-host runners on public cloud VMs. This is super easy if the build VM OS requirement is Linux-based since there are several low-cost public cloud Linux-based VM solutions. However, iOS and macOS developers may find fewer options for public cloud-delivered macOS based systems.\n\nIn this blog post tutorial, you will learn how to set up CI for iOS and macOS application development using a Scaleway Virtual Instance running the [GitLab application](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/) and a GitLab Runner that runs on a Scaleway-hosted [Mac mini M1](https://www.scaleway.com/en/hello-m1/). To complete this tutorial most successfully, we assume that you have some experience creating Xcode and GitLab projects, as well as some experiences using a Terminal and git.\n\n> **Requirements**\n>\n- You have an account and are logged into [console.scaleway.com](https://console.scaleway.com)\n- You have [configured your SSH Key](https://www.scaleway.com/en/docs/configure-new-ssh-key/)\n- You have a Virtual Instance running the GitLab InstantApp\n- **Note:** We assume you have already deployed a Virtual Instance running the GitLab InstantApp. If not, [deploy GitLab](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/) before continuing with this tutorial.\n\n### Deploying the Mac mini M1\n\n1. Log into your [Scaleway console](https://console.scaleway.com) and click on **Apple silicon** in the **Compute** section of the sidebar.\n\n   ![Orga_dashboard](https://about.gitlab.com/images/blogimages/scaleway-blog/Orga_dashboard.png){: .shadow.medium}\n   Click on the \"Apple silicon\" in the Scaleway console.\n   {: .note.text-center}\n\n1. The Apple silicon M1 as-a-Service splash screen displays. Click **Create a Mac mini M1**.\n1. Enter the details for your Mac mini M1:\n\n   - Select the geographic region in which your Mac mini M1 will be deployed.\n   - Choose the macOS version you want to run on the Mac mini M1.\n   - Select the hardware configuration for your Mac mini M1.\n   - Enter a name for your Mac mini M1.\n\n1. Click **Create a Mac mini M1** to launch the installation of your Apple silicon M1 as-a-Service.\n\n   ![M1_creation](https://about.gitlab.com/images/blogimages/scaleway-blog/M1_creation.png){: .shadow.medium}\n   Click \"Create a Mac mini M1\" to launch.\n   {: .note.text-center}\n\n1. Once deployed click **VNC** from the Mac mini M1 Overview page to launch the remote desktop client.\n\n1. Launch the **App Store** and install the **Xcode development environment** on your Mac mini M1.\n\n### Setting-up the Homebrew package manager\n\n[Homebrew](https://brew.sh/) is a package manager for macOS. It can be used to manage the software installed on your Mac. We use it to install `gitlab-runner` on your Mac mini M1.\n\n1. Click on the Terminal icon to open a new **Terminal**.\n\n1. Copy-paste the following code in the terminal application and press **Enter** to install Homebrew and the Xcode command line tools:\n\n   ```sh\n   /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"\n   ```\n\nLeave the terminal window open since it is required for the next step.\n\n#### Installing the GitLab Runner\n\nThe GitLab Runner is an application installed on a different computer than your GitLab host and runs jobs in a pipeline. It executes the build task on your Mac mini M1 for the code you push to your GitLab instance.\n\n1. Make sure you are still in the **Terminal** application. If you closed it after installing Homebrew, open a new one.\n\n1. Run the following command to install `gitlab-runner`:\n\n   ```\n   brew install gitlab-runner\n   ```\n\n### Configuring the Runner in GitLab\n\n   > **Note:** You require a Virtual Instance running the [GitLab InstantApp](https://www.scaleway.com/en/docs/how-to-use-the-gitlab-instant-apps/) for the following steps.\n\n1. GitLab Runner requires a registration token for the link between your GitLab Instance and the Runner. Open the GitLab web interface of your Virtual Instance and log into it.\n\n1. Select the project you want to use in GitLab with the Runner. If you don't have a project yet, click **+** > **Create Project** and fill in the required information about the project.\n\n1. On the projects overview page, click **Settings** > **CI/CD** to view the Continuous Integration settings.\n\n1. On the Continuous Integration settings page, click **Expand** in the **Runner** section to view the required information to link GitLab with your Runner.\n\n1. Scroll down to retrieve the GitLab Instance URL and the registration token.\n\n1. Run the following command in the Terminal application on your Mac to launch the configuration wizard for your GitLab Runner:\n\n   ```\n   gitlab-runner register\n   ```\n\n   Enter the required information as follows:\n\n   ```\n   Runtime platform                                    arch=arm64 os=darwin pid=810 revision=2ebc4dc4 version=13.9.0\n   WARNING: Running in user-mode.\n   WARNING: Use sudo for system-mode:\n   WARNING: $ sudo gitlab-runner...\n\n   Enter the GitLab instance URL (for example, https://gitlab.com/):\n   http://163.172.141.212/   \u003C- Enter the URL of your GitLab instance\n   Enter the registration token:\n   1mWBwzWAZSL7-pR18K3Y  \u003C- Enter the registration token for your Runner\n   Enter a description for the runner:\n   [306a20a2-2e01-4f2e-bc76-a004d35d9962]: Mac mini M1  \u003C- Enter a description for your Runner\n   Enter tags for the runner (comma-separated):\n   Mac, mini, M1, dev, xcode  \u003C- Optionally, enter tags for the runner\n   Registering runner... succeeded                     runner=1mWBwzWA\n   Enter an executor: shell, virtualbox, docker+machine, custom, docker, docker-ssh, kubernetes, parallels, ssh, docker-ssh+machine:\n   shell  \u003C- Enter the \"shell\" executor for the runner\n   Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n   ```\n\n1. Reload the CI/CD configuration page of your GitLab instance. The runner is now linked to your project and displays as available.\n\n   > **Note:** If you have several projects in a GitLab group, you can configure the Runner at the group-level. Runners available at the group-level are available for all projects within said group.\n\n### Configuring CI for your project\n\nGitLab stores the configuration of the CI in a file called `.gitlab-ci.yml`. This file should be in the folder you created for your project. Typically this is the same directory where your Xcode project file (`ProjectName.xcodeproj`) is located. The GitLab CI configuration file is written in [YAML](https://yaml.org/).\n\nInside the configuration file you can specify information like:\n\n* The scripts you want to run.\n* Other configuration files and templates you want to include.\n* Dependencies and caches.\n* The commands you want to run in sequence and those you want to run in parallel.\n* The location to deploy your application to.\n* Whether you want to run the scripts automatically or trigger any of them manually.\n\n1. Open a text editor on your local computer and create the `.gitlab-ci.yml` file as in the following example.\n\n   ```\n   stages:\n     - build\n     - test\n\n   build-code-job:\n     stage: build\n     script:\n       - echo \"Check the ruby version, then build some Ruby project files:\"\n       - ruby -v\n       - rake\n\n   test-code-job1:\n     stage: test\n     script:\n       - echo \"If the files are built successfully, test some files with one command:\"\n       - rake test1\n   ```\n\n1. Save the file and make a new commit to add it to your repository.\n\n1. Push the commit to GitLab. The CI will automatically launch the tasks on your Runner.\n\nFor more information on the GitLab CI configuration file, refer to the [official documentation](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\n\n### Speed up development with Scaleway and GitLab\n\nHaving a dedicated Mac available for executing your CI jobs can reduce your development team's cycle time. In this tutorial, we covered configuring a dedicated Mac mini M1 to host a GitLab Runner. If you want to learn more about the Mac mini M1 as-a-Service, refer to our [product documentation](https://www.scaleway.com/en/docs/apple-silicon-as-a-service-quickstart/).\nWe invite the GitLab community to start building on Scaleway today with a €10 voucher to use on dozens of products & services. Find out more [here.](https://www.scaleway.com/en/gitlab-m1/)\n\n\u003Chr>\n\n_Mac mini, macOS are trademarks of Apple Inc., registered in the U.S. and other countries and regions. IOS is a trademark or registered trademark of Cisco in the U.S. and other countries and is used by Apple under license. Scaleway is not affiliated with Apple Inc._\n",[1197,9,9],{"slug":1219,"featured":6,"template":690},"tuto-mac-m1-gitlab-ci","content:en-us:blog:tuto-mac-m1-gitlab-ci.yml","Tuto Mac M1 Gitlab Ci","en-us/blog/tuto-mac-m1-gitlab-ci.yml","en-us/blog/tuto-mac-m1-gitlab-ci",{"_path":1225,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1226,"content":1232,"config":1240,"_id":1242,"_type":14,"title":1243,"_source":16,"_file":1244,"_stem":1245,"_extension":19},"/en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"title":1227,"description":1228,"ogTitle":1227,"ogDescription":1228,"noIndex":6,"ogImage":1229,"ogUrl":1230,"ogSiteName":673,"ogType":674,"canonicalUrls":1230,"schema":1231},"Ultimate guide to migrating from AWS CodeCommit to GitLab","Learn how to migrate from AWS Services to GitLab and seamlessly integrate with the DevSecOps platform in this comprehensive tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097810/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2828%29_4mi0l4wzUa5VI4wtf8gInx_1750097810027.png","https://about.gitlab.com/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to migrating from AWS CodeCommit to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"},{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":1227,"description":1228,"authors":1233,"heroImage":1229,"date":1237,"body":1238,"category":685,"tags":1239},[1234,705,1235,1236],"Tsukasa Komatsubara","Samer Akkoub","Bart Zhang","2024-08-26","On July 25, 2024, AWS made a significant announcement regarding its CodeCommit service. As detailed in their [official blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/), AWS has decided to close new customer access to CodeCommit. While existing customers can continue using the service, AWS will not introduce new features, focusing only on security, availability, and performance improvements.\n\nThis announcement has prompted development teams to consider migrating their repositories to alternative Git providers. In light of these changes, we've prepared this comprehensive guide to assist teams in migrating to GitLab and integrating with other AWS services.\n\n**Note:** For more details on AWS's official migration recommendations, please refer to [their blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/).\n\n## About this guide\n\nThis guide provides comprehensive information for development teams using GitLab who are considering integration with AWS services or planning to migrate from AWS-hosted Git repositories to GitLab.com. The guide is structured into three main sections:\n\n- [Parallel migration to GitLab](#section-1-parallel-migration-to-gitlab): Explains how to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n\n- [Integration with AWS CodeBuild](#section-2-integrating-gitlab-with-aws-codebuild): Provides steps to integrate GitLab repositories with AWS CodeBuild, setting up a powerful continuous integration (CI) environment.\n\n- [Integration with AWS CodePipeline](#section-3-integrating-gitlab-with-aws-codepipeline): Details how to connect GitLab repositories with AWS CodePipeline to build efficient continuous delivery (CD) pipelines.\n\n- [Downstream integrations for CodePipeline and CodeStar Connections](#section-4-migrating-to-gitlab): Explains how to leverage GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nThrough this guide, you'll learn how to combine the powerful features of GitLab and AWS to create an efficient and flexible development workflow.\n\n## Section 1: Parallel migration to GitLab \n\nFor those considering migrating Git repositories hosted on AWS to GitLab.com, this section, which is a phased approach, introduces methods to achieve migration while minimizing risks. By leveraging GitLab's mirroring capabilities, you can maintain existing development flows while testing the new environment.\n\n### Why is parallel migration important?\n\nLarge-scale system migrations always involve risks, particularly potential impacts on ongoing development work, existing integrations, and automated processes. Adopting a parallel migration approach offers the following benefits:\n\n1. Risk minimization: Test the new environment while keeping existing systems operational.\n2. Seamless transition: Development teams can gradually acclimate to the new system.\n3. Integration testing: Thoroughly test all integrations and automation in the new environment.\n4. Future-proofing: Enable teams to gradually migrate to GitLab CI/CD in parallel to existing CI.\n\nParallel migration is not required if it is already known that you want to cut over directly to GitLab.\n\n### Steps for migrating to GitLab.com\n\n#### Step 1: Get set up on GitLab.com\n\n- Check if your company already has a group in use on GitLab.com and whether they have single sign-on (SSO) set up – if they do, then you will want to use both.\n\n- If your company does not have a presence on GitLab.com, visit [GitLab.com](www.gitlab.com) and create a new account or log in to an existing one.\n- Create a new company namespace (a group at the root level of gitlab.com).\n- Pick a name that reflects your entire company (and is not already taken).\n\n#### Step 2: Import repository\nFor parallel migration: Use GitLab's pull mirroring feature to automatically sync changes from AWS-hosted repositories to GitLab.com.\n\n1. Navigate to the target group GitLab.com.\n2. In the upper right, click \"New project.\"\n3. On the \"Create new project\" page, click \"Import project.\"\n4. On the \"Import project\" page, click \"Repository by URL.\"\n5. Enter the URL of your AWS-hosted repository in the \"Git repository URL\" field.\n6. Underneath the Git repository URL field, check \"Mirror repository.\"\n7. Set up authentication: in the AWS CodeCommit console, select the clone URL for the repository you will migrate. If you plan on importing CodeCommit repositories into GitLab, you can use the HTTPS CodeCommit URL to clone the repository via GitLab Repository Mirroring. You will need to also provide your Git credentials from AWS for your identity and access management (IAM) user within GitLab. You can create Git credentials for AWS CodeCommit by following this [AWS guide](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html).\n\n![Clone URL](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/clone-url-screenshot__1__aHR0cHM6_1750097822121.png)\n\nThis setup will automatically pull changes from the AWS-hosted repository to GitLab.com every five minutes by default.\n\nFor more information, read our [repository mirroring documentation](https://docs.gitlab.com/ee/user/project/repository/mirror/).\n\n#### Step 3: Test and validate integrations\n\n1. CI/CD pipelines: Set up the `.gitlab-ci.yml` file in GitLab CI to replicate existing pipelines. You can read more about [planning a migration from other CI tools into GitLab CI/CD](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html).\n2. Issue tracking: Import project issues and test workflows.\n3. Code review: Set up the merge request process and test review workflows.\n\n#### Step 4: Gradual migration\n\n1. Start with small or non-critical projects to familiarize yourself with working on GitLab.com.\n2. Provide training for team members and allow time to adapt to new workflows.\n3. Gradually migrate more projects while ensuring integrations and workflows are problem-free.\n\nFor more information, see [Automating Migrations from CodeCommit to GitLab](https://gitlab.com/guided-explorations/aws/migrating-from-codecommit-to-gitlab/-/blob/main/migrating_codecommit_to_gitlab.md).\n\n#### Step 5: Complete migration\nOnce all tests and validations are complete and the team is comfortable with the new environment, plan for full migration. For each project:\n\n1. Set a migration date and notify all stakeholders.\n2. Perform final data synchronization.\n3. Remove mirroring settings from the GitLab project.\n4. Set AWS-hosted repositories to read-only and transition all development work to GitLab.com.\n\n#### Step 6: Assess adoption of new capabilities\n\nGitLab collaboration and workflow automation for developers is far richer than CodeCommit. It merits some time to learn what these capabilities are. The merge request process is especially rich compared to CodeCommit.\n\nAfter repositories are stable on GitLab, it is very easy to experiment with GitLab CI/CD in parallel to an existing solution. Teams can take time to perfect their GitLab CI/CD automation while production workflows remain unaffected.\n\nGitLab artifact management is also very capable with the Releases feature and many package registries.\n\n### Section 1: Summary\nBy adopting a parallel migration approach to GitLab, you can achieve a smooth transition while minimizing risks. This process allows teams to gradually adapt to the new environment and ensure all integrations and automations function correctly. Cutover migrations only omit a single setting checkbox if it is known that a parallel migration is not necessary.\n\n## Section 2: Integrating GitLab with AWS CodeBuild\n\nFor those wanting to build and test code from GitLab repositories using AWS CodeBuild, this comprehensive guide will help you set up an efficient CI pipeline.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodeBuild service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connect setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822122.png)\n\n### Step 2: Create AWS CodeBuild project\n\n1. Click \"Create build project\" on the CodeBuild dashboard.\n2. Enter a project name and description.\n3. For source settings, select \"GitLab\" as the provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n\n![Add CodeBuild project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_3_add_codebuild_aHR0cHM6_1750097822123.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 2\nThis section explained in detail how to integrate GitLab repositories with AWS CodeBuild. This setup enables a continuous integration pipeline where code changes in GitLab are automatically built and tested using AWS CodeBuild.\n\n## Section 3: Integrating GitLab with AWS CodePipeline\n\nFor those looking to implement continuous delivery from GitLab repositories using AWS CodePipeline, this detailed guide will be helpful. The integration has become even easier now that GitLab is available as an AWS CodeStar Connections provider.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodePipeline service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connections setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822125.png)\n\n### Step 2: Create AWS CodePipeline\n\n1. Click \"Create pipeline\" on the CodePipeline dashboard.\n2. Enter a pipeline name and click \"Next.\"\n3. Select \"GitLab\" as the source provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n5. Select the Trigger type: You can trigger CodePipeline pipeline execution based on either pull or push events against specific branches and file types within your repository.\n\n![Add source provider](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_provider_aHR0cHM6_1750097822127.png)\n\n![Add source configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_configured_aHR0cHM6_1750097822129.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 3\nThis section detailed how to integrate GitLab repositories with AWS CodePipeline. This setup enables a continuous delivery pipeline where code changes in GitLab are automatically deployed to your AWS environment.\n\n## Section 4: Migrating to GitLab\n\nIntegrating GitLab with AWS unlocks powerful capabilities for streamlining your development and deployment workflows and helps to solve your source code management woes. This integration can be achieved in several ways, each offering unique benefits:\n\n- Using AWS CodeStar Connections to link GitLab with AWS services enables a more cohesive workflow by allowing external Git repositories, like GitLab, to connect with various AWS services. This setup supports automated builds, deployments, and other essential actions directly from your GitLab repository, making your development process more integrated and streamlined.\n\n- Connecting GitLab with AWS CodePipeline via AWS CodeStar Connections takes automation to the next level by allowing you to create a full CI/CD pipeline. This approach integrates GitLab with AWS CodePipeline, enabling you to automate the entire process – from source control and builds to testing and deployment – using AWS services like CodeBuild and CodeDeploy. This ensures a robust, scalable, and efficient delivery process.\n\n![Chart of new technology and solutions for using GitLab and AWS together](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/Announcing_New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together_aHR0cHM6_1750097822130.png)\n\n1\\. Connecting GitLab with AWS services using AWS CodeStar Connections\n\nAWS CodeStar Connections is a service that allows you to connect external Git repositories (such as GitHub or Bitbucket) to AWS services. You can also connect GitLab to AWS services via CodeStar Connections. When using GitLab, you may need to set up a custom connection as an HTTP Git server.\nThe following AWS services can be connected to GitLab using this method:\n\n- **AWS Service Catalog**\n\nAWS Service Catalog helps organizations standardize and manage AWS resources. Integrating it with GitLab improves transparency in resource management and simplifies change tracking. Specifically, you can automate catalog updates based on GitLab commits, enhancing operational efficiency.\n\n- __AWS CodeBuild__\n\nAWS CodeBuild is a managed build service that compiles source code, runs tests, and produces deployable software packages. Integrating GitLab with CodeBuild allows automated build processes to start whenever code changes are pushed to GitLab. This ensures consistency in builds and facilitates easier collaboration and version control.\n\n- __AWS Glue Notebook Jobs__\n\nAWS Glue Notebook Jobs is a service that allows you to interactively develop and run data preparation and ETL (Extract, Transform, Load) tasks. Integrating GitLab with Glue Notebook Jobs enables version control for notebooks and ETL scripts, promotes collaboration among team members, and improves the quality management of data processing pipelines.\n\n- __AWS Proton__\n\nAWS Proton is a service that automates the development and deployment of microservices and serverless applications. By integrating GitLab with AWS Proton, you can manage infrastructure as code, automate deployments, and ensure consistent environment management, leading to more efficient development processes.\n\nAs AWS CodeStar Connections supports more services, connecting GitLab with additional AWS services will become easier. It's advisable to regularly check for new services that support CodeStar Connections.\n\n2. Connecting CodePipeline with GitLab via AWS CodeStar Connections (including CodeDeploy)\n\nAWS CodePipeline is a continuous delivery service that automates the release process for software. To connect GitLab with CodePipeline, you need to use AWS CodeStar Connections. This setup allows you to designate a GitLab repository as the source and automate the entire CI/CD pipeline.\nThe primary actions supported by CodePipeline include:\n- **Source control:** AWS CodeCommit, GitHub, Bitbucket, GitLab\n- **Build and test:** AWS CodeBuild, Jenkins\n- **Deploy:** AWS CodeDeploy, Elastic Beanstalk, ECS, S3\n- **Approval:** Manual approval\n- **Infrastructure management:** AWS CloudFormation\n- **Serverless:** AWS Lambda\n- **Testing:** AWS Device Farm\n- **Custom Actions:** AWS Step Functions\n\nBy integrating GitLab with CodePipeline, you can automatically trigger the pipeline whenever code changes are pushed to GitLab, allowing a consistent process from build to deployment. Additionally, combining this with GitLab's version control capabilities makes it easier to track deployment history and states, leading to more flexible and reliable software delivery.\n\n## What you've learned\nThis guide has provided comprehensive information on migrating to and integrating GitLab with AWS. Through the four main topics, we've covered:\n- Parallel migration to GitLab: How to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n- Integration with AWS CodeBuild: Steps to set up a powerful CI environment integrated with GitLab repositories.\n- Integration with AWS CodePipeline: How to build efficient continuous delivery pipelines using GitLab repositories.\n- Downstream integrations for CodePipeline and CodeStar Connections: Leveraging GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nAs every organization's code hosting and integration implementation strategy is unique, this tutorial may be used as a starting point for your own GitLab + AWS integration and implementation strategy.\n\n## Additional resources\n\nFor more detailed information and advanced configurations, refer to the following resources:\n\n- [GitLab documentation](https://docs.gitlab.com/)\n- [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html)\n- [AWS CodePipeline User Guide](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/)\n- [Integrate with AWS](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html)\n\nIf you have questions or need support, please contact [GitLab Support](https://about.gitlab.com/support/) or AWS Support. We hope this comprehensive guide helps you in your AWS-GitLab integration journey.",[109,9,482,687,754,685,231],{"slug":1241,"featured":91,"template":690},"ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","content:en-us:blog:ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","Ultimate Guide To Migrating From Aws Codecommit To Gitlab","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"_path":1247,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1248,"content":1254,"config":1261,"_id":1263,"_type":14,"title":1264,"_source":16,"_file":1265,"_stem":1266,"_extension":19},"/en-us/blog/why-i-love-contributing-to-gitlab",{"title":1249,"description":1250,"ogTitle":1249,"ogDescription":1250,"noIndex":6,"ogImage":1251,"ogUrl":1252,"ogSiteName":673,"ogType":674,"canonicalUrls":1252,"schema":1253},"Why I love contributing to GitLab","Making small meaningful changes is what it's all about.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679501/Blog/Hero%20Images/new-feature.png","https://about.gitlab.com/blog/why-i-love-contributing-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why I love contributing to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2021-05-11\",\n      }",{"title":1249,"description":1250,"authors":1255,"heroImage":1251,"date":1257,"body":1258,"category":959,"tags":1259},[1256],"Austin Regnery","2021-05-11","\n\nIt was mid-morning on a Tuesday in February, and I had 10 minutes in between meetings. So I decided to try and solve a pain point of mine. \n\nYou see, I had to memorize this HTML snippet to create a collapsible section in GitLab Issue descriptions and comments, but I kept forgetting it. Was it `summary` or `section`? I could never remember.\n\n```html\n\u003Cdetails>\n\u003Csummary>Insert Title\u003C/summary>\nHidden content\n\u003C/details>\n```\n\nEven though it is not vanilla Markdown, GitLab knows how to interpret some HTML. I used this formatting trick fairly often since full-page screenshots can occupy a lot of screen space, which leads to excessive scrolling.\n\n\nSo I decided to poke around our codebase to see how the other Markdown shortcuts worked. To my surprise, it was pretty straightforward. Each shortcut had a simple text input that mapped to each button. This implementation was simple to replicate since I just needed to copy/paste and replace a few words.\n\n![Image of Vue and Haml files with editor shortcuts](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/vue-haml.png){: .shadow}\n\nThe Vue and Haml files with the new shortcut\n{: .note.text-center}\n\nI started a branch and began hacking away at the code. Now, I would never call myself a Software Engineer, but I like to try and make things from time to time. I was able to add a new shortcut to the toolbar to insert this code snippet for me in less than 10 minutes. No more memorizing! Making contributions like this is what makes working at GitLab so special.\n\nNow, it wasn't ready for production, but I at least had something that worked. I shared it with my UX colleagues in Slack, and it started to gain traction with several up-votes and few constructive comments on how to make it better.\n\nWith the functionality flushed out, a few other designers helped me get a better icon added to our SVG library. Using clear iconography is critical for communicating information more clearly.\n\n| Initial Icon | Final Icon |\n| - | - |\n| ![SVG of chevron right icon](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/chevron-right.svg) | ![SVG of details block icon](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/details-block.svg) |\n\nThe last thing to do was resolve my failing tests, and I had several teammates help me do that.\n\n![Gif of the shortcut being used](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/demo.gif){: .shadow}\n\nToday [this change](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/54938) merged! Now I solved a pain point for me and others. It took a few months to go from idea to production, but the effort was super low. I'd say the return on my initial investment, 10 minutes, is super high.\n\n> Having a direct impact on a product was never an option for me before joining GitLab.\n\n![Image of participants in the Merge Request](https://about.gitlab.com/images/blogimages/why-i-love-contributing-to-gitlab/participants.png){: .shadow}\n\nThank you to everyone that helped me deploy this\n{: .note.text-center}",[1260,685,9],"UX",{"slug":1262,"featured":6,"template":690},"why-i-love-contributing-to-gitlab","content:en-us:blog:why-i-love-contributing-to-gitlab.yml","Why I Love Contributing To Gitlab","en-us/blog/why-i-love-contributing-to-gitlab.yml","en-us/blog/why-i-love-contributing-to-gitlab",{"_path":1268,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1269,"content":1275,"config":1283,"_id":1285,"_type":14,"title":1286,"_source":16,"_file":1287,"_stem":1288,"_extension":19},"/en-us/blog/write-vulnerability-detection-rules",{"title":1270,"description":1271,"ogTitle":1270,"ogDescription":1271,"noIndex":6,"ogImage":1272,"ogUrl":1273,"ogSiteName":673,"ogType":674,"canonicalUrls":1273,"schema":1274},"How to write and continuously test vulnerability detection rules for SAST","Interns with the Google Summer of Code helped GitLab transition from our old SAST tools to Semgrep.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667819/Blog/Hero%20Images/anomaly-detection-cover.png","https://about.gitlab.com/blog/write-vulnerability-detection-rules","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to write and continuously test vulnerability detection rules for SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ross Fuhrman\"},{\"@type\":\"Person\",\"name\":\"Anshuman Singh\"},{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":1270,"description":1271,"authors":1276,"heroImage":1272,"date":1280,"body":1281,"category":916,"tags":1282},[1277,1278,1279],"Ross Fuhrman","Anshuman Singh","Julian Thome","2021-09-08","\nIn summer 2021, the [Vulnerability Research](/handbook/engineering/development/sec/secure/vulnerability-research/) and [Static Analysis](/handbook/engineering/development/sec/secure/static-analysis/)\nteams launched the [Google Summer of Code (GSoC)](https://summerofcode.withgoogle.com/) project: [Write vulnerability detection rules for SAST](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/3).\n\nFor this project, we built and implemented a framework to helps transition GitLab away from our current SAST tools over to Semgrep. Semgrep is a language-agnostic SAST tool that is gaining popularity in CI/CD environments.\nBefore replacing an analyzer with the corresponding Semgrep configuration (called rule-sets), we need to ensure that they are equivalent – in that they yield the same set of findings.\n\nFor this purpose, we built a testing framework that helps us assess the quality of a Semgrep rule-set. This framework has been used to guide the replacement of [flawfinder](https://gitlab.com/gitlab-org/security-products/analyzers/flawfinder), a C/C++ analyzer with a corresponding Semgrep rule-set. This new testing framework leverages the power of GitLab CI/CD.\n\n## Preliminaries\n\n### GitLab and the Google Summer Of Code (GSoC)\n\nThe Google Summer of Code (GSoC) is a 10-week program that enlists student interns to work on an open source project in collaboration with open source organizations. For GSoC 2021, GitLab offered [4 GSoC projects to the GSoC interns](/blog/gsoc-at-gitlab/). The [interns completed each of project](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues) under the guidance of a GitLab team member who serves as their mentor and provides regular feedback and assistance when needed.\n\n**[Read reflections from the Google Summer of Code interns about [what it was like working with GitLab](/blog/gsoc-at-gitlab/)]**\n\n### About Semgrep\n\n[Semgrep](https://semgrep.dev/) is a language-agnostic static-analysis (SAST) tool that is powered by [tree-sitter](https://tree-sitter.github.io/tree-sitter/). Tree-sitter is a robust parser-generator tool that supports parsing a variety of languages.\n\nSemgrep supports a [rule-syntax](https://semgrep.dev/docs/writing-rules/rule-syntax/) which can be used to formulate detection rules in a configuration-as-code YAML format. A Semgrep rule determines the findings that Semgrep is supposed to detect. These rules are combined together to create a rule-set.\n\n### About GitLab SAST\n\nGitLab is a complete DevSecOps platform and integrates a [variety of static analysis tools](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html) that help developers find vulnerabilities as early as possible in the software development lifecycle (SDLC).\n\nSince all the integrated SAST tools are very different in terms of implementation as well as tech stack they depend on, the SAST tools are all wrapped in Docker images. The wrappers translate the native vulnerability reports to a [generic, common report format](https://docs.gitlab.com/ee/user/application_security/sast/) which is made available by means of the `gl-sast-report.json` artifact. This generic report is GitLab's common interface between analyzers and the GitLab Rails backend.\n\n## Write vulnerability detection rules\n\n### Some background on our SAST tools\n\nOver time, the growing number of integrated SAST tools has become a maintenance burden for GitLab due to two major contributing factors.\n\n1. **Integration cost**: All SAST tools have different release cycles – new releases have to be pulled in immediately so that our users can benefit from them. Given the large amount of integrated SAST tools, the time spent to monitor the SAST tools for new releases, integrating and testing them is expensive in terms of engineering effort/time.\n\n1. **Inflexibility**: Adapting or modifying SAST tools behavior is non-trivial because each tool is based on different technologies. Also, upstream contributions to the original analyzer repositories are not guaranteed to be included by the maintainers. In these cases, they require us to fork a project which is not a scalable solution with regards to maintenance effort.\n\nGitLab is in the process of replacing various SAST tools with a single, language-agnostic SAST tool, called Semgrep, to fix these problems. Semgrep can be configured by means of rules that are used to define what Semgrep is supposed to find. These rules are provided as YAML configuration files so it is fairly easy to modify the behavior of Semgrep to different use cases.\nSemgrep's configuration-as-code approach paired with its language support enables us to replace multiple analyzers, which effectively reduces the maintenance burden.\n\nHowever, the SAST tool replacement itself is a challenging process. For the majority of use cases we have to assume that there is already a large amount of historic vulnerability data recorded and acted upon using [GitLab's vulnerability management features](/handbook/security/threat-management/vulnerability-management/). Users may also have grown accustomed to working with certain analyzers and may even have a certain level of expectation with regards to the findings produced by the analyzer.\n\nA smooth transition from a language-specific analyzer to a corresponding Semgrep rule-set must be guaranteed by meeting a certain level of quality assurance. A rule-set should be at least as good as the results produced by the original analyzers, also known as parity. In turn, parity required we build test-suites to be used to measure the gap (in terms of rule coverage) between the original analyzer and the rule-set that is to replace it. A good quality rule-set is expected to perform at least as well as the SAST tool it aims to replace (zero gap, full parity).\n\nThere are cases where the original SAST tool may falsely report vulnerabilities. In these situations, we aim to improve our rule-set in a controlled manner by explicitly documenting our improvements. However, before improving a rule-set, we want to start from a position of complete parity so that we have a holistic view of the impact incurred by single rule improvements. This documentation of applied improvements is important so we can justify changes with regard to reported findings to the customer.\n\nThere are three challenges we tried to address with this project:\n\n1. **Rule management**: Provide a central rule repository to store, distribute and track changes applied to rules as well as test-cases.\n1. **Rule testing**: Every change applied to a rule in the rule repository triggers an automated gap-analysis that measure the quality of the rules in comparison to the original analyzers.\n1. **Analyzer replacement**: Replace at least one SAST tool (in our case flawfinder) with a corresponding rule-set – use the testing framework to ensure that the rule-set is on par with the original SAST tool.\n\nWe unpack each of these challenges in the next section.\n\n### How we approached these challenges\n\nThe architecture of the rule-testing framework is depicted in the code snippets below. All the Semgrep rules and the corresponding test-cases are stored in a central rule repository. Changes that are applied to the rules trigger the execution of our rule testing framework that uses the rules and test-cases to perform an automated gap analysis.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n  crr[GitLab Rule Repository]\n\n  bandit[\"GitLab bandit\"]\n  bx[\"gl-sast-report.json\"]\n  sbx[\"gl-sast-report.json\"]\n  breport[\"bandit gap analysis report\"]\n\n  subgraph bandit_comparison[\"bandit comparison\"]\n    banditsemgrep[\"GitLab Semgrep\"]\n    banditcompare[\"compare\"]\n    bandit --> |run analyzer on test-cases| bx\n    banditsemgrep --> |run analyzer on test-cases| sbx\n    bx --> banditcompare\n    sbx --> banditcompare\n  end\n  crr -->|bandit rules + rule id mappings| banditsemgrep\n  banditcompare --> breport\n\n  fx[\"gl-sast-report.json\"]\n  fbx[\"gl-sast-report.json\"]\n  freport[\"flawfinder gap analysis report\"]\n  flawfinder[\"GitLab flawfinder\"]\n\n  subgraph flawfinder_comparison[\"flawfinder comparison\"]\n    flawfindersemgrep[\"GitLab Semgrep\"]\n    flawfindercompare[\"compare\"]\n    flawfinder --> |run analyzer on test-cases| fx\n    flawfindersemgrep --> |run analyzer on test-cases| fbx\n    fx --> flawfindercompare\n    fbx --> flawfindercompare\n  end\n  crr -->|flawfinder rules + rule id mappings| flawfindersemgrep\n  flawfindercompare --> freport\n\n\u003C/pre>\n\nThe rule testing framework is a compass that guides us through the rule development process by automatically measuring the efficacy of rules that are stored in the central rule (git) repository. This measurement happens during a comparison step that validates the findings reported by the original analyzer against the corresponding Semgrep rule-set. For the comparisons we cross-validate the SAST\nreports ([`gl-sast-report.json`](https://docs.gitlab.com/ee/user/application_security/sast/)) that adhere to the GitLab security report format. Since the main goal is to achieve parity between the original analyzer and our corresponding Semgrep rules, we treat the original analyzer as the baseline. The code snippet above depicts two example comparison steps for bandit and flawfinder.  The gap analysis is explained in more detail in the \"rule testing\" section below.\n\nUsing a central rule git repository allows us to manage and easily track changes that are applied to rules and their corresponding test-cases in a central location. By means of GitLab CI/CD, we have a mechanism to automatically run tests that enforce constraints and guidelines on the rules and test-cases. Upon rule changes, we automatically trigger the rule-testing framework which enables us to spot gaps in our rules instantly. The structure of the central rule repository is detailed in the \"rule management\" section below.\n\n#### How we addressed rule management challenges\n\nThe central rule repository is used to store, keep track of changes applied to `rules/test-cases` for a variety of different languages. By having a separate rule repository we can add CI jobs to test, verify, and enforce syntax guidelines.\n\nThe structure we use for the central rule repository is depicted below and follows the structure: `\u003Clanguage>/\u003Cruleclass>/{rule-\u003Crulename>.yml, test-\u003Crulename>.*}` where language denotes the target programming language, `\u003Cruleclass>` is a descriptive name for the class of issues the rule aims to detect and `\u003Crulename>` is a descriptive name for the actual rule.  We can have multiple test cases per rule (all prefixed with `test-`) and rule files `rule-\u003Crulename>.yml` that are prefixed with `rule-` – a rule file contains a single Semgrep rule.\n\n``` bash\n.\n├── mappings\n│   └── analyzer.yml\n├── c\n│   ├── buffer\n│   │   ├── rule-strcpy.yml\n│   │   ├── test-strcpy.c\n│   │   ├── rule-memcpy.yml\n│   │   └── test-memcpy.c\n│   └── ...\n└── javascript\n│   └── ...\n└── python\n│    ├── assert\n│    │   ├── rule-assert.yml\n│    │   └── test-assert.py\n│    └── exec\n│    │   ├── rule-exec.yml\n│    │   ├── test-exec.yml\n│    │   ├── rule-something.yml\n│    │   └── test-something.yml\n│    └── permission\n│    │   ├── rule-chmod.yml\n│    │   └── test-chmod.py\n│    └── ...\n└── ...\n```\n\nIn addition to the rules, we also store mapping files (in the `mappings` subdirectory). The mappings directory in this repository contains YAML configuration/mapping files that map native analyzer IDs to the corresponding Semgrep rules. An analyzer ID uniquely identifies the type of finding. The information in the mapping files helps us to correlate the finding from the original analyzer with their corresponding Semgrep findings and vice versa.\n\nThe mapping files are digested by the testing framework to perform an automated gap analysis. The goal of this analysis is to check if there is an unexpected deviation between Semgrep (with the rules in this repository) and a given analyzer.\n\nA mapping file groups distinct rules into rule-sets and, thus, can be used to bundle different rules based on a certain domain. An excerpt from a mapping file is depicted below – it maps bandit rules (identified by bandit IDs) to Semgrep rules from the central rule repository.\n\n``` yaml\nbandit:\n  - id: \"B101\"\n    rules:\n      - \"python/assert/rule-assert_used\"\n  - id: \"B102\"\n    rules:\n      - \"python/exec/rule-exec_used\"\n  - id: \"B103\"\n    rules:\n      - \"python/file_permissions/rule-general_bad_permission\"\n  - id: \"B104\"\n    rules:\n      - \"python/bind_all_interfaces/rule-general_bindall_interfaces\"\n```\n\n#### How the rule testing framework works\n\nThe test-oracle/baseline is provided by the original analyzer when executed on the test-files. The rules in the central rule repository are compared and evaluated against this baseline. The execution of the testing framework is triggered by any change applied to the rule repository.\n\nWe run all analyzers (flawfinder, bandit, etc.) and their corresponding Semgrep rule-sets (as defined by the mapping files) on the test-files from the GitLab rule repository. The resulting `gl-sast-reports.json` reports that are produced by the original analyzer and by the Semgrep analyzer are then compared in a pairwise manner. To identify identical findings in both reports, we leverage the information from the mapping files that maps the rule-ids of the baseline analyzer to the corresponding Semgrep rule-ids for the rules stored in the central rule repository.\n\nAs output, we produce a gap analysis report (in markdown format). The gap analysis lists all the findings that have been reported by the original analyzers and groups them into different tables (based on the native rule-ids). The screenshot below shows a single table from the gap analysis report.\n\n![Gap Analysis Report](https://about.gitlab.com/images/blogimages/testing-framework-report.png){: .shadow.center}\nAn example table from the gap analysis report.\n{: .note.text-center}\n\nThe `X` symbols indicate whether the analyzers (in the example, flawfinder and Semgrep) were able to detect a given finding. The concrete findings as well as the rule files are linked in the table. To reach full coverage, flawfinder as well as Semgrep have to cover the same findings for all the rules that are reported by the baseline.\n\n#### The analyzer replacement\n\nTo build a Semgrep rule-set that is on par with the capabilities of the original/baseline analyzer we leveraged the newly created testing framework. Flawfinder, a C/C++ analyzer, was the first analyzer we fully migrated to Semgrep using the testing framework as a compass.\n\nFirst, we checked the flawfinder implementation to identify the implemented rules. Given that flawfinder is a Python script and that the rules are essentially stored in a dictionary/hash data-structure, we were able to semi-automatically extract the rules and generate the corresponding Semgrep rule files. We were also able to source the test-files from the flawfinder source code repository.\n\nAfter the initial import of the first set of rules-files and test-cases, we used the information provided by the testing-framework to see which rules needed refinement.\n\nWe responded to the information provided by our testing framework in the following way:\n\n1. Findings covered by Baseline and covered by our rule-set: Nothing to be done.\n1. Findings covered by Baseline but not covered by our rule-set: This denotes an incomplete ruleset. In this case we extended the rule-file by providing additional `pattern` entries.\n1. Findings not covered by Baseline but covered by our rule-set: This usually denotes that some rules are too vaguely formulated. In this case, we refined our rules by using exclusions, e.g., by using `pattern-not` or by adding more detail to an already existing pattern.\n\nThe rule design was an iterative process where we closed the gaps between our semgrep rule-set and the flawfinder baseline in an iterative manner using the testing framework as an oracle to ultimately achieve 100% parity.\n\n## How the GSoC project helped GitLab\n\nIn this GSoC project we successfully built an automated rule/configuration testing framework that is driven by GitLab CI/CD capabilities and that provided the data we needed to replace flawfinder reliably and quickly with a corresponding Semgrep rule-set.\n\nIf you are interested in finding out more information about this GSoC project, please check out the following repositories:\n\n- [Central Rule Repository](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/playground/sast-rules)\n- [Testing Framework](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing)\n- [Gap Analysis Computation Tool](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/report-diff)\n- [Repository to track gap statistics](https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/rule-testing-framework/rule-testing-stats)\n",[916,9,961],{"slug":1284,"featured":6,"template":690},"write-vulnerability-detection-rules","content:en-us:blog:write-vulnerability-detection-rules.yml","Write Vulnerability Detection Rules","en-us/blog/write-vulnerability-detection-rules.yml","en-us/blog/write-vulnerability-detection-rules",{"_path":1290,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1291,"content":1297,"config":1304,"_id":1306,"_type":14,"title":1307,"_source":16,"_file":1308,"_stem":1309,"_extension":19},"/en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"title":1292,"description":1293,"ogTitle":1292,"ogDescription":1293,"noIndex":6,"ogImage":1294,"ogUrl":1295,"ogSiteName":673,"ogType":674,"canonicalUrls":1295,"schema":1296},"Setting up 100 AWS Graviton Spot Runners for GitLab","Utilizing the GitLab HA Scaling Runner Vending Machine for AWS Automation to setup 100 GitLab runners on AWS Spot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669882/Blog/Hero%20Images/hundredgitlabspotrunner.png","https://about.gitlab.com/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Nupur Sharma\"}],\n        \"datePublished\": \"2021-08-17\",\n      }",{"title":1298,"description":1293,"authors":1299,"heroImage":1294,"date":1301,"body":1302,"category":708,"tags":1303},"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour",[705,1300],"Nupur Sharma","2021-08-17","\n\nManaging elastically scaled or highly available compute infrastructures is one of the key challenges the cloud was built for. Application scaling concerns can be handled by cloud services that are purpose designed, rigorously tested, and continually improved. This article dives into some specific enablement automation that brings the benefits of AWS Autoscaling Groups (ASG) to runner management. There are benefits to both the largest fleets and single instance runners.\n\nEmbedded in this article is a YouTube video that demonstrates the deployment of 100 GitLab runners on Amazon EC2 Spot compute in less than 10 minutes using less than 10 clicks. The video also shows updating this entire fleet in under 10 minutes to emphasize the time savings of built-in maintenace.\n\nThe information and automation in this article applies to GitLab Private Runners which are deployed on your own compute resources. Self-managed GitLab instances require private runners, but they can also be configured and used with GitLab.com SaaS accounts.\n\n## Well-architected runner management\n\nThere are many different reasons that a customer might need to deploy multiple runners with various characteristics. Some of the more popular ones are:\n\n- Workloads that require large-scale runner fleets.\n- To gain cost savings through Spot compute, uptime scheduling, and ARM architecture.\n- Projects with high demand of CI activity to make sure that the runner is not being held up by jobs on another project.\n- Jobs that have special security requirements, e.g., security credentials, role-based access or managed identities for Continuous Delivery (CD). These security requirements can enable instance-level (AWS IAM Instance Profile) security by allowing runners with sufficient rights to deploy in specific target environments. For example, a CD runner for non-production environments and a different runner for production.\n- Implementing role-based access control rather than user-based. This means users don't have to use secrets to manage security requirements for CI jobs to accomplish their tasks.\n- Development teams can be confident the runner has the same capabilities for CI and CD automation they test through their interactive logins by leveraging a common IAM role.\n\n### The challenges of building production-grade elastic GitLab Runners\n\n[The GitLab Runner](https://docs.gitlab.com/runner/) is the workhorse of GitLab CI and CD capabilities. The runner can handle numerous operating environments and automation functions for a GitLab instance. The GitLab Runner has become very sophisticated due to the broad range of supported environments. In order to successfully configure the GitLab Runner as a set-it-and-forget-it service, the user has to work through many different decisions and considerations. We summarize some of the GitLab Runner-specific considerations that can be challenging:\n\n- There are a lot of configuration options and scenarios to sort through. It can be an iterative process to discover what needs to be done to set up GitLab Runners.\n- Ensuring runners are a production-grade capability requires Infrastructure as Code (IaC) development so that high availability and scaling can be achieved by automatically spawning new instances.\n- Ensuring that runner deregistration happens correctly when GitLab Runners are automatically scaled in.\n- Additional cost-saving configurations, such as Spot compute and scheduled runner uptime, can complicate the automation requirements for AWS Autoscaling Groups (ASGs).\n- Large organizations often want developers to be able to easily self-service deploy runners with various configurations. Service Management Automation (SMA) has been made popular with products like Service Now, AWS Service Catalog, and AWS Control Tower. This automation is compatible with SMA.\n- It can be difficult to map runners to AWS and map AWS to runners in large organizations with numerous runners and AWS accounts.\n\n### Introducing the GitLab HA Scaling Runner Vending Machine for AWS\n\nAn effective way to handle multiple design considerations is to make a reusable tool. To help you with best practice runner deployments on AWS, we created the [GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/) (\"The GitLab Runner Vending Machine\"). It is created in AWS’ Infrastructure as Code, known as CloudFormation.\n\n> **Designed with AWS Well Architected:** This automation has many features beyond the scope of this blog post. The primary focus of this blog post is on managing costs. See the [full list of features here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md).\n\nThe GitLab Runner Vending Machine has the following cost management and scaling management benefits, exposed as a variety of parameters:\n\n- The ability to leverage Spot compute instances. This is important because it leaves CI/CD pipeline developers in charge of whether specific Gitlab CI/CD jobs run on Spot compute or not.\n- ASG-scheduled scaling so that a runner or runner fleet can be completely shutdown when not in use.\n- The GitLab Runner Vending Machine can leverage ARM compute for Linux - which runs faster and costs less.\n- It can also use ASG to update all runners in a fleet with the latest machine images and GitLab Runner version (or a specific version). When maintenance is not built-in, the labor cost of keeping things up-to-date can be significant.\n- Runner naming and tagging in AWS and GitLab, which eases the burden of locating runner instances and managing orphaned runners registrations, whether it is manual or automated.\n\n### How to save money with The GitLab Runner Vending Machine\n\nSignificant savings are possible with this IaC, whether your team wants to save on a single runner or a fleet of them.\n\nThe savings calculations below are for a single runner and should be linear for a given workload. To calculate your savings for more runners, simply multiply the final result by the number of runner instances. The available \"Runner Minutes\" per hour is calculated as the runner's job concurrency setting multiplied by the minutes in an hour. For this exercise, we'll use job concurrency of \"10\". This number should be changed depending on the instance types you are using and the load testing of your typical CI/CD workloads.\n\nJust like most performance analysis, we are assuming that hardware resource utilization is optimal and consistent. If a runner cluster can sustain respectable performance with 80% CPU loading, this calculation assumes that would be maintained regardless of the size of the cluster.\n\n#### AWS Graviton ARM and Spot savings\n\nThe GitLab Runner engineering team has completed performance testing that demonstrates performance gains of more than 30% on some AWS Graviton (ARM-based) instance types. Assuming that runners are performance-managed for optimized utilization, this gain is a direct cost savings. Just recently, we shared [how deploying GitLab on Arm-based AWS Graviton2 resulted in cost savings of 23% and 36% performance gains](/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor/).\n\n![ARM Efficiency Test Results For GitLab Runner](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image1.png)\nGitLab Runner testing results for ARM-efficiency gains.\n{: .note.text-center}\n\n#### Scheduling savings\n\nThe savings can be dramatic when teams are able to turn off runners when not in use. For instance: Scheduling a runner to operate for 40-hours per week saves 76% when compared to the cost of running it for 168 hours. Runners that are just in use for 10 hours per week saves 94%.\n\n#### Combining scheduling, Spot, and ARM to save 97%\n\nJust for fun, let's see what savings are possible by comparing a standard runner scenario with deploying runners in customized, stand-alone instances to the maximum savings automation can deliver.\n\nImagine I am a developer who set up a custom GitLab Runner on an m5.xlarge instance, which is x86 the architecture, for a development team that works for 40 hours on the same time zone. Since there is no automation, the GitLab Runner runs 24/7. We will assume a job concurrency of 10, which gives 600 \"runner minutes\" per hour of run time. Scheduling uptime, running on Spot, and leveraging ARM can all be achieved quickly by redeploying the runner with The GitLab Runner Vending Machine.\n\nHere is the calculation to run the configuration described above, for one week: On Demand, x86, Always On: 1 x m5.xlarge = .192/hr x 168 hrs/week = **$32/week or $1664/year**\n\nHere are the savings that come from running Spot, ARM, and scheduling the Runner to be up just 40hrs/week: 1 x m6g.large Spot = .0419 x 40hrs/week x 64% (36% better performance) = **$1/week**\n\n$1/$32 x 100 = 3.125% of the original cost for the same work. In other words, **we just saved 97%** without ever impacting the ability to get the job done.\n\nIn short, The GitLab Runner Vending Machine intends to bring the many cost saving mechanisms of AWS Cloud computing to your GitLab Runner fleets.\n\nYou can save costs by using ARM/Graviton instances, Spot compute, or by scheduling uptime. In many cases, you can combine all three savings mechanisms for maximum impact.\n\n### Special pipeline building concerns for Spot Runners\n\nSpot instances can disappear with as little as two minutes of warning. This inevitably means some runners will be terminated while jobs are still in progress. CI/CD pipeline developers must take into account whether a job ought to run on compute resources that can disappear with short notice (so short as to be considered \"no notice\"). This comes down to deciding what jobs are OK to run on Spot and what jobs should instead run on AWS' persistent compute known as \"On-Demand\".\n\nThe GitLab Runner Vending Machine accounts for these constraints by tagging runner instances in GitLab with `computetype-spot` or `computetype-ondemand` – indicating in the \"tags\" segment of GitLab CI/CD jobs if a job should run on Spot compute.\n\nSome types of CI workloads, e.g., mass performance testing or large unit testing suites, may already have work queues and work tracking that make it ideal for Spot compute. Other activities, e.g., polling another system for a deployment status, could suffer a material discrepancy if terminated permaturely. Others, such as building the application, are sort of in the middle. Usually, restarting the build is sufficient.\n\n### Job configuration for Spot\n\nIf you need to reschedule terminated work, it is helpful to configure GitLab’s job `retry:` keyword. When working with a dispatching engine or work queue that automatically accounts for incompleted work by processing agents, the retry configuration is unnecessary.\n\nHere is an example that implements both of these concepts:\n\n```\nmy-scaled-test-suite:\n  parallel: 100\n  tags:\n  - computetype-Spot\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n      - unknown_failure\n```\n\nThe usage and limitations of `retry:` are discussed in greater detail in the [GitLab CI documentation on retry](https://docs.gitlab.com/ee/ci/yaml/#retry).\n\n### How to get started\n\nThe CloudFormation templates for the [GitLab Runner Vending Machine are managed in a public project on GitLab.com](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/). There is a lot of information in the project about how the solution works and what problems it aims to solve, and will be useful for very experienced AWS builders.\n\nBut to keep it simple for users who want the quickest path to creating runners of all sizes, it also has an \"easy button\" page that has a table that looks like this:\n\n![Easy Button Page Sample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image2.png)\nThe easy buttons launch a CloudFormation Quick Create that only requires filling in a few fields.\n{: .note.text-center}\n\nKeep in mind that easy buttons intentionally hide the high degree of customization that is possible with this automation by setting the parameters for the most common scenarios in advance. Advanced AWS users should read more of the documentation in the repository to understand that the GitLab Runner Vending Machine is also capable of creating sophisticated runner fleets.\n\nFirst, click the CloudFormation icons to launch the Easy Button template directly into the CloudFormation Quick Create console. The Quick Create console is designed for simplicity to enable you to complete the prompts and then click one button to launch the stack.\n\n![CloudFormation Quick Create Example](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image3.png){: .shadow.medium.center}\nThis is a typical Quick Create form for the GitLab Vending Machine easy buttons.\n{: .note.text-center}\n\nNext, select the deploy region by using the drop down menu in the upper right of the console (where the screenshot says \"Oregon\").\n\nIn most cases, you will only need to add your GitLab instance URL (GitLab.com is fine if that is where your repositories are), and the runner token, which you retrieve from the group level or project you wish to attach the runners to. If you are registering against a self-managed instance, you can use the instance-level tokens from the administrator console to register the runner for use across the entire instance. Read on for [instructions for finding Runner Registration Tokens](https://docs.gitlab.com/runner/register/#requirements).\n\nA few other customization parameters are available for your convenience.\n\nNote that the automation attempts to use the default VPC of the region in which you deploy and the default security group for the VPC. In some organizations, default VPCs and/or their security groups are locked. You can deploy to custom VPCs by using the full template instead of an easy button. On the easy button page look for the footnote \"Not any easy button person?\"\" to find a link to the full template.\n\nWatch the video below to see the deployment of provisioning 100 GitLab Spot Runners on AWS in less than 10 minutes and in less than 10 clicks for just $5 per hour.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/EW4RJv5zW4U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCheck out the YouTube playlist for more relevant videos about [GitLab and AWS](https://youtube.com/playlist?list=PL05JrBw4t0Ko30Bkf8bAvR-8E441Fy2G9)\n\n### This automation does much, much more\n\nWhile this article focused how much you can saving while using Spot for scaled runners, the underlying automation is capable of many other scenarios. Below is a summary of the additional features and benefits covered in the documentation.\n\n- Scaled runners that are persistent (not Spot) ([see more easy buttons here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/easybuttons.md)).\n- Supports small, single runner setups and scaled ones.\n- Supports GitLab.com SaaS or self-managed instances.\n- Automates OS patching and Runner version upgrading.\n- Supports Windows and Linux.\n- Can be reused with Amazon provisioning services such as Service Catalog and Control Tower.\n- Implements least privilege security throughout.\n- Supports deregistering runners on scale-in or Spot termination.\n\nA full feature list is in the document [Features of GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md)\n\n### Easy running\n\nWe hope that this automation will make deployment of runners of all sizes simple for you. We are open to your feedback, suggestions and contributions in the GitLab project.\n",[732,733,710,9],{"slug":1305,"featured":6,"template":690},"100-runners-in-less-than-10mins-and-less-than-10-clicks","content:en-us:blog:100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","100 Runners In Less Than 10mins And Less Than 10 Clicks","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",4,[666,695,717,740,761,781,804,824,844],1754336031893]