[{"data":1,"prerenderedAt":1127},["ShallowReactive",2],{"/en-us/blog/tags/solutions-architecture/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":453,"solutions architecture-tag-page-en-us":664},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/solutions-architecture","tags",false,"",{"tag":9,"tagSlug":10},"solutions architecture","solutions-architecture",{"template":12},"BlogTag","content:en-us:blog:tags:solutions-architecture.yml","yaml","Solutions Architecture","content","en-us/blog/tags/solutions-architecture.yml","en-us/blog/tags/solutions-architecture","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"image":445,"config":448,"_id":450,"_type":14,"_source":16,"_file":451,"_stem":452,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"config":446},{"src":447},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":449},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":454,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":455,"_id":660,"_type":14,"title":661,"_source":16,"_file":662,"_stem":663,"_extension":19},"/shared/en-us/main-footer",{"text":456,"source":457,"edit":463,"contribute":468,"config":473,"items":478,"minimal":652},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":458,"config":459},"View page source",{"href":460,"dataGaName":461,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":464,"config":465},"Edit this page",{"href":466,"dataGaName":467,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":469,"config":470},"Please contribute",{"href":471,"dataGaName":472,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":474,"facebook":475,"youtube":476,"linkedin":477},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[479,502,559,588,622],{"title":46,"links":480,"subMenu":485},[481],{"text":482,"config":483},"DevSecOps platform",{"href":55,"dataGaName":484,"dataGaLocation":462},"devsecops platform",[486],{"title":186,"links":487},[488,492,497],{"text":489,"config":490},"View plans",{"href":188,"dataGaName":491,"dataGaLocation":462},"view plans",{"text":493,"config":494},"Why Premium?",{"href":495,"dataGaName":496,"dataGaLocation":462},"/pricing/premium/","why premium",{"text":498,"config":499},"Why Ultimate?",{"href":500,"dataGaName":501,"dataGaLocation":462},"/pricing/ultimate/","why ultimate",{"title":503,"links":504},"Solutions",[505,510,513,515,520,525,529,532,536,541,543,546,549,554],{"text":506,"config":507},"Digital transformation",{"href":508,"dataGaName":509,"dataGaLocation":462},"/topics/digital-transformation/","digital transformation",{"text":134,"config":511},{"href":129,"dataGaName":512,"dataGaLocation":462},"security & compliance",{"text":123,"config":514},{"href":105,"dataGaName":106,"dataGaLocation":462},{"text":516,"config":517},"Agile development",{"href":518,"dataGaName":519,"dataGaLocation":462},"/solutions/agile-delivery/","agile delivery",{"text":521,"config":522},"Cloud transformation",{"href":523,"dataGaName":524,"dataGaLocation":462},"/topics/cloud-native/","cloud transformation",{"text":526,"config":527},"SCM",{"href":119,"dataGaName":528,"dataGaLocation":462},"source code management",{"text":109,"config":530},{"href":111,"dataGaName":531,"dataGaLocation":462},"continuous integration & delivery",{"text":533,"config":534},"Value stream management",{"href":161,"dataGaName":535,"dataGaLocation":462},"value stream management",{"text":537,"config":538},"GitOps",{"href":539,"dataGaName":540,"dataGaLocation":462},"/solutions/gitops/","gitops",{"text":171,"config":542},{"href":173,"dataGaName":174,"dataGaLocation":462},{"text":544,"config":545},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":462},{"text":547,"config":548},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":462},{"text":550,"config":551},"Education",{"href":552,"dataGaName":553,"dataGaLocation":462},"/solutions/education/","education",{"text":555,"config":556},"Financial services",{"href":557,"dataGaName":558,"dataGaLocation":462},"/solutions/finance/","financial services",{"title":191,"links":560},[561,563,565,567,570,572,574,576,578,580,582,584,586],{"text":203,"config":562},{"href":205,"dataGaName":206,"dataGaLocation":462},{"text":208,"config":564},{"href":210,"dataGaName":211,"dataGaLocation":462},{"text":213,"config":566},{"href":215,"dataGaName":216,"dataGaLocation":462},{"text":218,"config":568},{"href":220,"dataGaName":569,"dataGaLocation":462},"docs",{"text":241,"config":571},{"href":243,"dataGaName":244,"dataGaLocation":462},{"text":236,"config":573},{"href":238,"dataGaName":239,"dataGaLocation":462},{"text":246,"config":575},{"href":248,"dataGaName":249,"dataGaLocation":462},{"text":259,"config":577},{"href":261,"dataGaName":262,"dataGaLocation":462},{"text":251,"config":579},{"href":253,"dataGaName":254,"dataGaLocation":462},{"text":264,"config":581},{"href":266,"dataGaName":267,"dataGaLocation":462},{"text":269,"config":583},{"href":271,"dataGaName":272,"dataGaLocation":462},{"text":274,"config":585},{"href":276,"dataGaName":277,"dataGaLocation":462},{"text":279,"config":587},{"href":281,"dataGaName":282,"dataGaLocation":462},{"title":297,"links":589},[590,592,594,596,598,600,602,606,611,613,615,617],{"text":304,"config":591},{"href":306,"dataGaName":299,"dataGaLocation":462},{"text":309,"config":593},{"href":311,"dataGaName":312,"dataGaLocation":462},{"text":317,"config":595},{"href":319,"dataGaName":320,"dataGaLocation":462},{"text":322,"config":597},{"href":324,"dataGaName":325,"dataGaLocation":462},{"text":327,"config":599},{"href":329,"dataGaName":330,"dataGaLocation":462},{"text":332,"config":601},{"href":334,"dataGaName":335,"dataGaLocation":462},{"text":603,"config":604},"Sustainability",{"href":605,"dataGaName":603,"dataGaLocation":462},"/sustainability/",{"text":607,"config":608},"Diversity, inclusion and belonging (DIB)",{"href":609,"dataGaName":610,"dataGaLocation":462},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":612},{"href":339,"dataGaName":340,"dataGaLocation":462},{"text":347,"config":614},{"href":349,"dataGaName":350,"dataGaLocation":462},{"text":352,"config":616},{"href":354,"dataGaName":355,"dataGaLocation":462},{"text":618,"config":619},"Modern Slavery Transparency Statement",{"href":620,"dataGaName":621,"dataGaLocation":462},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":623,"links":624},"Contact Us",[625,628,630,632,637,642,647],{"text":626,"config":627},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":462},{"text":366,"config":629},{"href":368,"dataGaName":369,"dataGaLocation":462},{"text":371,"config":631},{"href":373,"dataGaName":374,"dataGaLocation":462},{"text":633,"config":634},"Status",{"href":635,"dataGaName":636,"dataGaLocation":462},"https://status.gitlab.com/","status",{"text":638,"config":639},"Terms of use",{"href":640,"dataGaName":641,"dataGaLocation":462},"/terms/","terms of use",{"text":643,"config":644},"Privacy statement",{"href":645,"dataGaName":646,"dataGaLocation":462},"/privacy/","privacy statement",{"text":648,"config":649},"Cookie preferences",{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":653},[654,656,658],{"text":638,"config":655},{"href":640,"dataGaName":641,"dataGaLocation":462},{"text":643,"config":657},{"href":645,"dataGaName":646,"dataGaLocation":462},{"text":648,"config":659},{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":665,"featuredPost":1105,"totalPagesCount":1125,"initialPosts":1126},[666,693,715,736,758,779,799,819,840,862,882,903,922,943,962,982,1004,1023,1042,1062,1084],{"_path":667,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":668,"content":676,"config":686,"_id":689,"_type":14,"title":690,"_source":16,"_file":691,"_stem":692,"_extension":19},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":669,"description":670,"ogTitle":669,"ogDescription":670,"noIndex":6,"ogImage":671,"ogUrl":672,"ogSiteName":673,"ogType":674,"canonicalUrls":672,"schema":675},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":669,"description":670,"authors":677,"heroImage":671,"date":679,"body":680,"category":681,"tags":682},[678],"Tim Rizzi","2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic Container Registry (ECR) to GitLab. Can you help?\" This question kept coming up in conversations with platform engineers. They were modernizing their DevSecOps toolchain with GitLab but got stuck when faced with moving their container images. While each image transfer is simple, the sheer volume made it daunting.\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done – pull, retag, push. But I have 200 microservices, each with multiple tags. I can't justify spending weeks on this migration when I have critical infrastructure work.\"\n\n## The challenge\n\nThat conversation sparked an idea. What if we could automate the entire process? When platform teams move their [CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating container images shouldn't be the bottleneck. The manual process is straightforward but repetitive – pull each image, retag it, and push it to GitLab's Container Registry. Multiply this by dozens of repositories and multiple tags per image, and you're looking at days or weeks of tedious work.\n\n## The solution\n\nWe set out to create a GitLab pipeline that would automatically do all this heavy lifting. The goal was simple: Give platform engineers a tool they could set up in minutes and let run overnight, waking up to find all their images migrated successfully.\n\n### Setting up access\n\nFirst things first – security. We wanted to ensure teams could run this migration with minimal AWS permissions. Here's the read-only identity and access management (IAM) policy you'll need:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n```\n\n### GitLab configuration\n\nWith security handled, the next step is setting up GitLab. We kept this minimal - you'll need to configure these variables in your CI/CD settings:\n\n```\nAWS_ACCOUNT_ID: Your AWS account number\nAWS_DEFAULT_REGION: Your ECR region\nAWS_ACCESS_KEY_ID: [Masked]\nAWS_SECRET_ACCESS_KEY: [Masked]\nBULK_MIGRATE: true\n```\n\n### The migration pipeline\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker to handle all the image operations reliably:\n\n```yaml\nimage: docker:20.10\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\nThe pipeline works in three phases, each building on the last:\n\n1. Discovery\n\nFirst, it finds all your repositories:\n\n```bash\nREPOS=$(aws ecr describe-repositories --query 'repositories[*].repositoryName' --output text)\n```\n\n2. Tag enumeration\n\nThen, for each repository, it gets all the tags:\n\n```bash\nTAGS=$(aws ecr describe-images --repository-name $repo --query 'imageDetails[*].imageTags[]' --output text)\n```\n\n3. Transfer\n\nFinally, it handles the actual migration:\n\n```bash\ndocker pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\ndocker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag} ${CI_REGISTRY_IMAGE}/${repo}:${tag}\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n```\n\n## What you get\n\nRemember that platform engineer who didn't want to spend weeks on migration? Here's what this solution delivers:\n\n- automated discovery and migration of all repositories and tags\n- consistent image naming between ECR and GitLab\n- error handling for failed transfers\n- clear logging for tracking progress\n\nInstead of writing scripts and babysitting the migration, the platform engineer could focus on more valuable work.\n\n## Usage\n\nGetting started is straightforward:\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n2. Configure the AWS and GitLab variables.\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n## Best practices\n\nThrough helping teams with their migrations, we've learned a few things:\n\n- Run during off-peak hours to minimize the impact on your team.\n- Keep an eye on the pipeline logs - they'll tell you if anything needs attention.\n- Don't decommission ECR until you've verified all images transferred successfully.\n- For very large migrations, consider adding rate limiting to avoid overwhelming your network\n\nWe've open-sourced this pipeline in our public GitLab repository because we believe platform engineers should spend time building valuable infrastructure, not copying container images. Feel free to adapt it for your needs or ask questions about implementation.\n\n> #### Get started with this and other package components with our [CI/CD Catalog documentation](https://gitlab.com/explore/catalog/components/package).","engineering",[109,683,684,482,685,9],"AWS","tutorial","product",{"slug":687,"featured":91,"template":688},"automating-container-image-migration-from-amazon-ecr-to-gitlab","BlogPost","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":694,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":695,"content":701,"config":709,"_id":711,"_type":14,"title":712,"_source":16,"_file":713,"_stem":714,"_extension":19},"/en-us/blog/code-counting-in-gitlab",{"title":696,"description":697,"ogTitle":696,"ogDescription":697,"noIndex":6,"ogImage":698,"ogUrl":699,"ogSiteName":673,"ogType":674,"canonicalUrls":699,"schema":700},"Lightning fast code counting for better code management intelligence","Knowledge of your code composition can come through simple counting of lines of code per language.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682614/Blog/Hero%20Images/noaa-PkHsrwNOfBE-unsplash.jpg","https://about.gitlab.com/blog/code-counting-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lightning fast code counting for better code management intelligence\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-02-15\",\n      }",{"title":696,"description":697,"authors":702,"heroImage":698,"date":704,"body":705,"category":681,"tags":706},[703],"Darwin Sanoy","2023-02-15","\n\nOne of the earliest forms of intelligence was to simply answer the question “How many?”. Counting is one of the first things that we learn as a child. As we grow older, we come to see this deceptively simple concept as somewhat childish. Yet, upon the concept of counting, the entire discipline of statistics is founded. In turn, every discipline that benefits from statistics owes a debt of gratitude to the very humble concept of counting. \n\nMany of the massive data lakes we keep are essentially vast amounts of counting. Using artificial intelligence to analyze this data, we frequently find insights we were not expecting. So it would seem that counting is somewhat of a fractal concept – it’s deceptively simple, but, when compounded, generates delightful things.\n\nSo if we have a thing we are trying to be more intelligent about, our first endeavor might be to count it. Let’s see how to apply that to our code stored in GitLab.\n\n### Why developers count code\n\nThe following list is from real-world scenarios. Many of them are also asserted in Ben Boyter’s blog post [Why count lines of code?](https://boyter.org/posts/why-count-lines-of-code/). Their enumeration here is not an endorsement of the validity or accuracy of code counting for the claimed benefit and the fundamental assumptions of such models are not stated. Because code counting is essentially a form of modeling, it is also subject to George Box’s axiom: “All models are wrong, but some are useful.”\n\n- Showing the languages in a repository using an absolute metric like source lines of code helps to quickly assess if one can contribute to the project, given their own talents. \n- Cost assessment for anything which charges by “lines of code” (some code scanning and development tools may charge this way).\n- Although [research](https://gitlab.com/gitlab-org/gitlab/-/issues/371038) shows that lines of code are not a good metric for measuring contribution, some developers have gotten used to seeing lines of code per contributor. \n- Code base shrinkage as a measure of good architecture (simplification).\n- Anything where the complexity of code affects project agility and costs. For instance, assessing and reporting status on migrating a code base to a new language. \n- Staffing a development team – understanding what language competencies are needed across the team and in what relative proportion to each other or understanding that for the entire organization’s codebase.\n- IT tooling decisions to support the needs of an organization given the most used coding languages across all repositories in the org.\n- Assessment of tech debt.\n\nWhile it is easy to create bad models with any of the above counts, the focus of this post is to get some good counts from which you can carefully build a model.\n\n### Toolsmithing GitLab CI: A working example as a shared CI library\n\nThe easiest way to differentiate between a “toolized” and “templated” solution is that you can simply and easily reuse this exact code without needing to change it. Many formal coding languages have the concept of shared libraries or dependencies that are essentially toolized. A templated solution consists of a starting point that you customize and then have to manage the code yourself. These can function as scaffolding for a starting point for an entire project or snippets of code that do a specific function. The fundamental difference is that when you use a template, you end up owning and managing the resultant code going forward.\n\nIn [GitLab CI](https://docs.gitlab.com/ee/ci/introduction/index.html), we can create our own tooling or dependencies with a few tricks stolen from [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). These tricks are:\n\n1. Use includes to reference the shared managed library code (this creates a “dependency” on code that is being managed outside of your own).\n2. The includable code must be written like a function where all needed inputs are either passed in, or can be collected from the environment. No hard coding is allowed because that means you’ve created a template which can’t be depended upon directly.\n3. Use GitLab CI ‘functions’. I am coining this term to indicate that in GitLab you can precede a job name with a “.” (dot) and it will not be executed when it is read. Then you can create a new job using all the code in the “dot named” job and add variables by using the `extends:` command keyword. By using dot named jobs in your includable code, the developer consuming the “managed shared CI dependency” can decide when, where, and how to call the toolized code.\n\n### The result: A code-counting GitLab CI extension\n\nHere are some of the final design attributes of this code counting solution:\n\n- Is extremely fast for the given task.\n- Leverages the Git clone opitimizations lessons contained in this article: [How much code do I have? A DevSecOps story](https://acloudguru.com/blog/engineering/how-much-code-do-i-have-a-devsecops-story).\n- Uses the [lightning fast, open source code counting tool SCC](https://github.com/boyter/scc) by Ben Boyter.\n- Is implemented as a reusable GitLab CI shared library extension.\n- Allows configuration of the file extensions that should not be checked out because they do not include source code to be counted.\n- Leverages the GitLab Run Pipeline forms capability.\n- Can enumerate and count an entire group hierarchy in GitLab, or be given a stipulated list.\n- Uses the runner token to access and read repositories by default, but can be given a specific token.\n- Uploads HTML and text artifacts that contain the code counting report.\n- Purposely emits the code counting results into CI logs for easy reference.\n\n### The output\n\nResults are shown below in the CI log but they are also captured as an HTML artifact.\n\nThe clone time is also in the log for each project so that it can be verified that the cloning optimizations are making a substantial difference.\n\nThese particular results are counting all the code in [https://gitlab.com/guided-explorations](https://gitlab.com/guided-explorations).\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/code-counting-in-gitlab/codecountingcilog.png)\n\n### The code\n\nThe code is available in this project: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc). You can view the scanning results in the job logs of past runs here: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines).\n\nRather than fail the entire job when a project fails to clone, the job simply logs the error from an attempted clone. This allows review of valid use cases for not being able to clone and obtains as complete of a picture as possible. The cloning error log is uploaded as a job artifact and emitted to the log.\n\n### Innovation: MR complexity metrics extension\n\nDuring a customer engagement I was asked whether there was a way to assess how much change a Merge Request contained and mark it. This was because an operations team was missing their SLAs for deployments due to the amount of change, and, therefore, risk and review could be highly variable. However, since there was no way to estimate this without human eyes on it, MRs with a high degree of change would overrun their SLA when they couldn’t be pre-triaged.\n\nI wondered if I could use the previously built code counting solution to count diffs and get a rough idea of how much change had occurred in the commits of an MR branch and then apply labels to MRs to give at rough idea of their degree of change as a sort of proxy for how much review time might be required.\n\nIt turned out to be plausible and you can review the [Shared Library in Git Diff Revision Activity Metrics CI EXTENSION](https://gitlab.com/guided-explorations/code-metrics/git-diff-revision-activity-metrics) and see the results in the MRs list of this working example project that uses that code: [MR list for Diff Revision Activity Analytics DEMO](https://gitlab.com/guided-explorations/code-metrics/diff-revision-activity-analytics-demo/-/merge_requests).\n\n### The value of remote work water cooler conversations\n\nI have to let you know why this blog was written now when this solution has been around for quite a while. You often hear about how working remotely does not allow for water cooler conversations, which in the story you’re told are where real innovation happens.\n\nWithin GitLab’s [Remote First culture](/company/culture/all-remote/guide/) it is expected that anyone in the company can schedule a “coffee chat” with anyone else. The cultural expectation is that this is normal and, unless you are getting an overwhelming number of these requests, that when asked, you will find time to socially connect.\n\nI received a coffee chat request from [Torsten Linz](https://gitlab.com/tlinz), the Senior Product Manager for the Source Code Management group, to chat about my comments and linking of a working example to an issue about code counting that he had become aware of. He also wanted to see if I could help get a copy of it working in his GitLab group.\n\nDuring that collaborative time, I discovered that my example was not working because of some major code changes in SCC and because it presumed the GitLab group to be enumerated did not need the counting job to authenticate to prove that it should have access to the projects. While we were collaborating, we fixed these problems and improved the solution to use the SCC binary, rather than depend on working Golang runtimes. After our collaborative session, as I tweaked some more, I did parameter documentation in README.md and debugged the ability to run it either with a group enumeration or a provided list of specific git repos.\n\nSo I owe big thanks to Torsten and to GitLab’s cultural support for remote first water cooler conversations for improving this working example to the point that it is worth sharing with a broader audience. If you’d like to know more, check out the GitLab handbook page: [Informal communication in an all-remote environment](/company/culture/all-remote/informal-communication/).\n\n_Cover image by [NOAA](https://unsplash.com/@noaa?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/lightning-fast?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[707,9,708],"DevOps","CI",{"slug":710,"featured":6,"template":688},"code-counting-in-gitlab","content:en-us:blog:code-counting-in-gitlab.yml","Code Counting In Gitlab","en-us/blog/code-counting-in-gitlab.yml","en-us/blog/code-counting-in-gitlab",{"_path":716,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":717,"content":723,"config":730,"_id":732,"_type":14,"title":733,"_source":16,"_file":734,"_stem":735,"_extension":19},"/en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"title":718,"description":719,"ogTitle":718,"ogDescription":719,"noIndex":6,"ogImage":720,"ogUrl":721,"ogSiteName":673,"ogType":674,"canonicalUrls":721,"schema":722},"Data-driven DevSecOps: Exploring GitLab Insights Dashboards","Learn how to leverage GitLab Insights Dashboards to visualize key metrics, track project progress, and boost team productivity with customizable, data-driven views.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097210/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097210214.png","https://about.gitlab.com/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Data-driven DevSecOps: Exploring GitLab Insights Dashboards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ricardo Amarilla Villalba\"}],\n        \"datePublished\": \"2024-11-20\",\n      }",{"title":718,"description":719,"authors":724,"heroImage":720,"date":726,"body":727,"category":685,"tags":728},[725],"Ricardo Amarilla Villalba","2024-11-20","Metrics and analytics play a crucial role in driving productivity, quality, and success. GitLab, as a comprehensive DevSecOps platform, offers powerful tools for tracking and visualizing these vital metrics through its Insights Dashboards. In this article, you'll learn how to use the Insights Dashboards in your environment.\n\n## Introduction to GitLab metrics and analytics \n\nGitLab provides an array of metrics and analytics tools that cover various aspects of the DevSecOps lifecycle:\n\n1. [Productivity Analytics](https://docs.gitlab.com/ee/user/analytics/productivity_analytics.html): Track team velocity, cycle time, and lead time.  \n2. [Code Review Analytics](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html): Measure code quality, test coverage, and review efficiency.  \n3. [CI/CD Analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html): Monitor pipeline performance and deployment frequency.  \n4. [Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/): Visualize the flow of work from idea to production.  \n5. [Insights](https://docs.gitlab.com/ee/user/project/insights/): Explore and visualize data about your projects and groups.\n\nThese metrics offer invaluable insights into your development process, helping teams identify bottlenecks, optimize workflows, and make data-driven decisions.\n\n## Leveraging labels for specific metrics\n\nOne of GitLab's most powerful, yet understated features, is Labels, which allows you to filter and focus on specific metrics with pinpoint accuracy. By strategically applying labels to issues, merge requests, and epics, you can create custom views that provide targeted insights into your project's performance and progress.\n\nLabels in GitLab act as versatile identifiers, allowing you to categorize and organize your work items with great flexibility. Whether you're tracking feature development, bug fixes, or team-specific tasks, labels enable you to slice and dice your project data in ways that reveal meaningful patterns and trends. This concept parallels the use of tags in cloud deployments, where resources are labeled for easier management, cost allocation, and operational insights.\n\nBy thoughtfully labeling your work items, you're essentially creating a sophisticated labeling system that can be leveraged to generate custom dashboards and reports. This approach empowers you to zoom in on the metrics that matter most to your team or stakeholders, providing a clear and focused view of your project's health and momentum.\n\n## How to configure GitLab Insights\n\nGitLab Insights allow you to explore and visualize data about your projects and groups. They provide valuable analytics on various aspects such as issues created and closed during a specified period, average time for merge requests to be merged, and triage hygiene. Insights can be configured for both projects and groups.\n\nTo configure Insights:\n\n1. For project insights:  \n   * Create a file named `.gitlab/insights.yml` in the root directory of your project.  \n2. For group insights:  \n   * Create a `.gitlab/insights.yml` file in a project that belongs to your group.  \n   * Go to your group's **Settings > General**.  \n   * Expand the **Analytics section** and find the **Insights section**.  \n   * Select the project containing the configuration file and save changes.\n\nThe `.gitlab/insights.yml` file is a YAML file where you define the structure and order of charts in a report, as well as the style of charts to be displayed. Each chart definition includes parameters such as title, description, type, and query to specify the data source and filtering conditions.\n\nTo view insights, navigate to **Analyze > Insights** in your project or group.\n\n![View default Insights Dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097217972.png)\n\n## Customize merge request insights\n\nWhile the default view provides valuable raw information, we can customize the Insights Dashboard to uncover additional layers of information, such as which team was responsible for each merge request and what type of problem each one solved.\n\n## Merge request insights for each squad and requirement type\n\nMeasuring squad productivity in GitLab can be challenging, especially when the GitLab group and subgroup structure doesn't align perfectly with your squad organization. Here's how to overcome these challenges and effectively track squad productivity:\n\n### **Setting up squad-based metrics**\n\n1. **Label creation:** Create unique scope labels for each squad (e.g., `squad::alpha`, `squad::beta`) and each requirement type (e.g., `type::bug`, `type::feature`, `type::maintenance`).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZUOzORIUJeU?si=T8eHeGizS3blYFHB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n2. **Label application:** Consistently apply these squad labels to all issues and merge requests handled by each squad, regardless of the project or group they're in.  \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fJ9entEBZG8?si=MlM6mKirEdkmwDDJ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Hints:**  \n   * Use GitLab API to apply labels massively to existing open, merged, and closed MRs.  \n   * Add/remove/update labels as part of your GitLab CI pipeline.  \n   * Leverage the GitLab Triage Bot to automate the labeling process.  \n\n3. Dashboard setup: Create a `.gitlab/insights.yml` file in your project repository with custom charts for team-specific and type-specific merge request insights.\n\n```\n\n## Default Merge Requests insights.yml \nmergeRequests:\n  title: Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week \n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month\n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n\n## Per-teams Merge Requests insights.yml\nmergeRequestsTeams:\n  title: Merge requests dashboard per teams\n  charts:\n    - title: Merge requests merged per week \n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n    - title: Merge requests merged per month\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n\n## Per-teams and Type Merge Requests insights.yml\nmergeRequestsTeamsAndType:\n  title: Per Teams and Type - Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n    - title: Merge requests merged per week - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n\n```\n\nBy implementing these customizations, you can create insightful dashboards that provide a clear view of merge request activity per team and requirement type, allowing you to visualize trends over time, compare performance between squads, and analyze the distribution of different types of work for each squad. \n\n![dashboards with view of MR activity per team and requirement type](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097217972.png)\n\n![dashboard comparing performance between squads](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097217974.png)\n\n## Get started today\n\nGitLab Insights is just the tip of the iceberg when it comes to metrics and analytics. To explore the full range of GitLab's powerful analytics features, including Value Stream Analytics, CI/CD Analytics, and Code Review metrics, check out our Value Stream Management product tour:\n\n[![Value Stream Management product tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-11-20_at_12.28.08_PM_aHR0cHM6_1750097217976.png)](https://gitlab.navattic.com/vsm)\n\n> Ready to start your own metrics journey? Sign up for a [free 60-day trial of GitLab Ultimate today](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F) and unlock the full potential of data-driven DevSecOps.\n\n## Read more\n- [Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n- [AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n",[109,482,685,729,684,9],"features",{"slug":731,"featured":91,"template":688},"data-driven-devsecops-exploring-gitlab-insights-dashboards","content:en-us:blog:data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","Data Driven Devsecops Exploring Gitlab Insights Dashboards","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"_path":737,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":738,"content":744,"config":752,"_id":754,"_type":14,"title":755,"_source":16,"_file":756,"_stem":757,"_extension":19},"/en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"title":739,"description":740,"ogTitle":739,"ogDescription":740,"noIndex":6,"ogImage":741,"ogUrl":742,"ogSiteName":673,"ogType":674,"canonicalUrls":742,"schema":743},"Deploy a NodeJS Express app with GitLab's Cloud Run integration","This tutorial will show you how to use NodeJS and Express to deploy an application to Google Cloud. This step-by-step guide will have you up and running in less than 10 minutes with the Cloud Run integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097892/Blog/Hero%20Images/Blog/Hero%20Images/speedlights_speedlights.png_1750097891963.png","https://about.gitlab.com/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a NodeJS Express app with GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":739,"description":740,"authors":745,"heroImage":741,"date":748,"body":749,"category":681,"tags":750},[746,747],"Sarah Matthies","Noah Ing","2025-01-13","Are you looking to deploy your NodeJS app to Google Cloud with the least maintenance possible? This tutorial will show you how to utilize GitLab’s Google Cloud integration to deploy your NodeJS app in less than 10 minutes.\n\nTraditionally, deploying an application often requires assistance from production or DevOps engineers. This integration now empowers developers to handle deployments independently. Whether you’re a solo developer or part of a large team, this setup gives everyone the ability to deploy their applications efficiently.\n\n## Overview\n\n- Create a new project in GitLab\n- Set up your NodeJS application\n- Use the Google Cloud integration to create a Service account\n- Use the Google Cloud integration to configure Cloud Run via Merge Request\n- Enjoy your newly deployed NodeJS app\n- Follow the cleanup guide\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of JavaScript/TypeScript (not playing favorites here!)\n- Working knowledge of GitLab CI\n- 10 minutes \n\n## Step-by-step guide\n\n### 1. Create a new project in GitLab\n\nWe decided to call our project `nodejs–express-cloud-run` for simplicity.\n\n![Create a new project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097905106.png)\n\n### 2. Upload your NodeJS app or use this example to get started.\n\n[Demo](https://gitlab.com/demos/templates/nodejs-cloud-run)\n\n**Note:** Make sure to include the `cloud-run` [CI template](https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library/-/raw/main/gcp/cloud-run.gitlab-ci.yml) within your project.\n\n![cloud-run CI template include](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097905107.png)\n\n### 3. Use the Google Cloud integration to create a Service account.\n\nNavigate to __Operate > Google Cloud > Create Service account__.\n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097905109.png)\n\nAlso configure the region you would like the Cloud Run instance deployed to.\n\n![Cloud Run instance deployment region selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097905113.png)\n\n### 4. Go to the Deployments tab and use the Google Cloud integration to configure __Cloud Run via Merge Request__.\n\n![Deployments - Configuration of Cloud Run via Merge Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097905115.png)\n\nThis will open a merge request – immediately merge it.\n\n![Merge request for deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097905117.png)\n\n__Note:__ `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097905118.png)\n\n### 5. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run using GitLab CI.\n\n![Successful deployment to Google Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097905119.png)\n\nClick the Service URL to view your newly deployed Node server.\n\n![View newly deployed Node server](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097905120.png)\n\nIn addition, you can navigate to __Operate > Environments__ to see a list of deployments for your environments.\n\n![Environments view of deployment list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097905121.png)\n\nBy clicking on the environment called `main`, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main view of deployments to specific environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097905122.png)\n\n### 6. Next steps\n\nTo get started with developing your Node application, try adding another endpoint. For instance, in your `index.js` file, you can add a **/bye** endpoint as shown below:\n\n```\napp.get('/bye', (req, res) => {\n  res.send(`Have a great day! See you!`);\n});\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n![Bye message](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097905123.png)\n\n## Follow the cleanup guide\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Read more of these helpful [tutorials from GitLab solutions architects](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[109,751,231,9,684],"google",{"slug":753,"featured":91,"template":688},"deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","content:en-us:blog:deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","Deploy A Nodejs Express App With Gitlabs Cloud Run Integration","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"_path":759,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":760,"content":766,"config":773,"_id":775,"_type":14,"title":776,"_source":16,"_file":777,"_stem":778,"_extension":19},"/en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud",{"title":761,"description":762,"ogTitle":761,"ogDescription":762,"noIndex":6,"ogImage":763,"ogUrl":764,"ogSiteName":673,"ogType":674,"canonicalUrls":764,"schema":765},"Deploy a server using Go with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Golang server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098028/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098028126.png","https://about.gitlab.com/blog/deploy-a-server-using-go-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a server using Go with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Claire Champernowne\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":761,"description":762,"authors":767,"heroImage":763,"date":769,"body":770,"category":685,"tags":771},[768,747],"Claire Champernowne","2025-01-28","Deploying an application to the cloud often requires assistance from production or DevOps engineers. GitLab's Google Cloud integration empowers developers to handle deployments independently. In this tutorial, you'll learn how to deploy a server to Google Cloud in less than 10 minutes using Go. Whether you’re a solo developer or part of a large team, this setup allows you to deploy applications efficiently.\n\n## You'll learn how to:\n\n1. Create a new project in GitLab\n2. Create a Go server utilizing `main.go`\n3. Use the Google Cloud integration to create a Service account\n4. Use the Google Cloud integration to create Cloud Run via a merge request\n5. Access your newly deployed Go server\n6. Clean up your environment\n\n## Prerequisites\n\n- Owner access on a Google Cloud Platform project\n- Working knowledge of Golang\n- Working knowledge of GitLab CI\n- 10 minutes\n\n## Step-by-step Golang server deployment to Google Cloud\n\n### 1. Create a new blank project in GitLab.\n\nWe decided to call our project `golang-cloud-run` for simplicity.\n\n![Create a new blank project in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098035249.png)\n\n### 2. Create a server utilizing this `main.go` demo.\n\nFind the `main.go` demo [here](https://gitlab.com/demos/applications/golang-cloud-run).\n\n```\n// Sample run-helloworld is a minimal Cloud Run service.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n)\n\nfunc main() {\n\tlog.Print(\"starting server...\")\n\thttp.HandleFunc(\"/\", handler)\n\n\t// Determine port for HTTP service.\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"defaulting to port %s\", port)\n\t}\n\n\t// Start HTTP server.\n\tlog.Printf(\"listening on port %s\", port)\n\tif err := http.ListenAndServe(\":\"+port, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Hello %s!\\n\", name)\n}\n```\n\n### 3. Use the Google Cloud integration to create a Service account.\n\nNavigate to **Operate \\> Google Cloud \\> Create Service account**.\n\n![Golang tutorial - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098036/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098035250.png)\n\n### 4. Configure the region you would like the Cloud Run instance deployed to.\n\n![Golang tutorial - image10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098035252.png)\n\n### 5. Use the Google Cloud integration to configure Cloud Run via Merge Request.\n\n![Golang tutorial - image4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098035254.png)\n\n### 6. This will open a merge request. Immediately merge the MR.\n\n![Golang tutorial - image6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098036/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098035257.png)\n\nThis merge request adds a CI/CD deployment job to your pipeline definition. In our case, this is also creating a pipeline definition, as we didn’t have one before.\n\n**Note:** The CI/CD variables `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps. \n\n![Golang tutorial - image7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098035259.png)\n\n### 7. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.\n\n![Golang tutorial - image2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098035261.png)\n\n\u003Cbr>\n\n![Golang tutorial - image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098035262.png)\n\n## 8. Click the Service URL to view your newly deployed server.\n\nAlternatively, you can navigate to **Operate \\> Environments** to see a list of deployments for your environments.\n\n![Golang tutorial - image5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098035264.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Golang tutorial - image8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098035265.png)\n\n## Next steps\n\nTo get started with developing your Go application, try adding another endpoint. For instance, in your `main.go` file, you can add a `/bye` endpoint as shown below (don’t forget to register the new handler function in main!):\n\n```\nfunc main() {\n\tlog.Print(\"starting server...\")\n\n\thttp.HandleFunc(\"/\", handler)\n\thttp.HandleFunc(\"/bye\", byeHandler)\n```\n\n```\nfunc byeHandler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Bye %s!\\n\", name)\n}\n```\n\nYour `main.go` file should now look something like this:\n\n```\n// Sample run-helloworld is a minimal Cloud Run service.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n)\n\nfunc main() {\n\tlog.Print(\"starting server...\")\n\n\thttp.HandleFunc(\"/\", handler)\n\n\thttp.HandleFunc(\"/bye\", byeHandler)\n\n\t// Determine port for HTTP service.\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"defaulting to port %s\", port)\n\t}\n\n\t// Start HTTP server.\n\tlog.Printf(\"listening on port %s\", port)\n\tif err := http.ListenAndServe(\":\"+port, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Hello %s!\\n\", name)\n}\n\nfunc byeHandler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Bye %s!\\n\", name)\n}\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run job` deploy the updates. Once it’s complete, go back to the Service URL and navigate to the `/bye` endpoint to see the new functionality in action.\n\n## Clean up the environment\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Discover more tutorials like this in our [Solutions Architecture](https://about.gitlab.com/blog/tags/solutions-architecture/) area.\n",[772,482,684,9,685,729],"DevSecOps",{"slug":774,"featured":6,"template":688},"deploy-a-server-using-go-with-gitlab-google-cloud","content:en-us:blog:deploy-a-server-using-go-with-gitlab-google-cloud.yml","Deploy A Server Using Go With Gitlab Google Cloud","en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud.yml","en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud",{"_path":780,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":781,"content":787,"config":793,"_id":795,"_type":14,"title":796,"_source":16,"_file":797,"_stem":798,"_extension":19},"/en-us/blog/eks-fargate-runner",{"title":782,"description":783,"ogTitle":782,"ogDescription":783,"noIndex":6,"ogImage":784,"ogUrl":785,"ogSiteName":673,"ogType":674,"canonicalUrls":785,"schema":786},"Setting up GitLab EKS Fargate Runners in just one hour","This detailed tutorial answers the question of how to leverage Amazon's AWS Fargate container technology for GitLab Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663373/Blog/Hero%20Images/jeremy-lapak-CVvFVQ_-oUg-700unsplash.jpg","https://about.gitlab.com/blog/eks-fargate-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-05-24\",\n      }",{"title":788,"description":783,"authors":789,"heroImage":784,"date":790,"body":791,"category":681,"tags":792},"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1",[703],"2023-05-24","\nLeveraging Amazon's AWS Fargate container technology for [GitLab Runners](https://docs.gitlab.com/runner/) has been a longstanding ask from our customers. This tutorial gets you up and running with the GitLab EKS Fargate Runner combo in less than an hour.\n\nGitLab has a pattern for this task for [Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html) runners under AWS Elastic Container Service (ECS). The primary challenge with this solution is that AWS ECS itself does not allow for the overriding of what image is used when calling an ECS task. Therefore, each GitLab Runner manager ignores the gitlab-ci.yml `image:` tag and runs on the image preconfigured in the task during deployment of the runner manager. As a result, you'll end up creating runner container images that contain every dependency for all the software built by the runner, or you'll create a lot of runner managers per image — or both.\n\nI have long wondered if Fargate-backed Elastic Kubernetes Service (EKS) could get around this limitation since, by nature, Kubernetes must be able to run any image given to it.\n\n## The approach\n\nNothing takes the joy out of learning faster than a lot of complex setup before being able to get to the point of the exercise. To address this, this tutorial uses four things to dramatically reduce the time and steps required to get from zero to hero.\n\n1. AWS CloudShell to minimize the EKS Admin Tooling setup. This also leaves your local machine environment untouched so that other tooling configurations don't get modified.\n2. A project called **AWS CloudShell ”Run From Web” Configuration Scripts** to rapidly add additional tooling to CloudShell. This includes some hacks to get large Terraform templates to work on AWS CloudShell.\n3. EKS Blueprints — specifically, a Terraform example that implements both the [Karpenter autoscaler](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/) and Fargate, including for the kube-system namespace.\n4. A simple Helm install for GitLab Runner.\n\nAlthough you will be running CLI commands and editing config files, no coding is required in the sense that you won't have to build something complex from scratch and then maintain it yourself.\n\n## The results\n\nIt works! It can run 2 x 200 (max allowed per job) parallel “Hello, World” jobs on AWS Fargate-backed EKS in about 4 minutes, which demonstrates the unlimited scalability. It can also run a simple Auto DevOps pipeline, which proves out the ability to run a bunch of different containers.\n\nThe fact that the entire cluster - including kube-system - is Fargate backed reduces the Kubernetes specific long term SRE work to a much lower value approaching that of ECS Fargate clusters. Later on we discuss that this trade-off has a cost and how it can be reconfigured.\n\n## What makes it possible: Product-managed IaC that is an extensible framework\n\nToolkitting made up of Infrastructure as Code (IaC) is frequently referred to as “templates,” and these templates have a reputation of not aging well because there is no active stewardship of the codebase — they are thought of as a one-and-done effort. However, this term does not reflect reality well when the underlying IaC code is actually being product-managed. You can tell if something is being product-managed by using these markers:\n\n- It has a scope-bounded vision of what it wants to do for the community being served (customer).\n- It has active stewardship that keeps the codebase moving along, even if it is open source.\n- It seeks to incorporate strategic enhancements, a.k.a. new features.\n- Things that are broken are considered bugs and are actively eliminated.\n- There is a cadence of taking underlying version updates and for supporting new versions of the primary things they deploy.\n\nAs an extensible framework, EKS Blueprints:\n\n- Are purposefully architected to be extended by anyone.\n- Already have many extensions built.\n\nWhen implementing using EKS Blueprints and you come upon a new need, it is important to check if EKS Blueprints already handles that consideration - similarly to how you would look for Ruby Gems, NPM Modules or Python PyPI packages before building functionality from scratch.\n\nAll of the above are aspects of how the AWS EKS team is product-managing EKS Blueprints. They deserve a big round of applause because product-managing anything to prevent it from becoming yet another community-maintained shelfware project is a strong commitment that requires tenacity!\n\n## Reproducing the experiment\n\n### 1. Set up AWS CloudShell\n\n> **Note:** If you already have a fully persistent environment setup (like your laptop) with: AWS CLI, kubectl, Terraform, then you can avoid environment rebuilds when AWS CloudShell times out by using that instead.\n\nAWS CloudShell comes with kubectl, Git, and AWS CLI, which are all needed. However, we also need a few other scripts. More information about these scripts can be read in [my blog post on AWS CloudShell “Run For Web” Configuration Scripts](https://missionimpossiblecode.io/aws-cloudshell-run-from-web-configuration-scripts).\n\n> **Note:** The steps in this section up through the `git clone` from GitLab step (second clone operation) in the next section can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}*` .\n\n1. Use the web console to login to an AWS account where you have admin permissions.\n2. Switch to the region of your choosing.\n3. In the bottom left of the console click the “CloudShell” icon.\n4. Copy and paste the following one-liner into the console to install Helm, Terraform, and the Nano text editor:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n5. Since our Terraform template will grow larger than the 1GB limit of space in the $HOME directory, we need a workaround to use the template in one directory, but store the Terraform state in $HOME where it will be kept as long as 120 days. The following one-liner triggers a script that performs that setup for us, after which we can use the /terraform directory for our template:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n\n### 2. Run Terraform EKS Blueprint\n\n> **Note:** If at any time you leave your AWS CloudShell long enough for your session to end, the /terraform directory will be tossed. Simply run the last script above and the first four steps below to make it operable again. This will most likely be necessary when it comes time to teardown the Terraform created AWS resources.\n>\n> Sometimes your AWS CloudShell credentials may expire with a message like: `Error: Kubernetes cluster unreachable: Get \">CLUSTER URL>\": getting credentials: exec: executable aws failed with exit code 255`. Simply refresh the entire browser tab where AWS CloudShell is running and you’ll generally have new credentials.\n\n#### Version safety\n\nThis tutorial uses a specific release of the EKS Blueprint project so that you have the known state at the time of publishing. The project version also cascades into the versions of all the many dependent modules. While it may also work with the latest version, the version at the time of writing was Version 4.29.0.\n\nThis tutorial also uses Terraform binary Version 1.4.5.\n\n#### Procedures\n\nIf, while using AWS CloudShell, you experience this error: `Error: configuring Terraform AWS Provider: no valid credential sources for Terraform AWS Provider found`, you will need to refresh your browser to update the cached credentials in the terminal session.\n\nPerform the following commands on the AWS CloudShell session:\n\n1. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n2. `cd /terraform/terraform-aws-eks-blueprints/`\n3. `git reset --hard tags/v4.29.0` #Version pegging to the code that this article was authored with.\n4. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   **Note:** Like other EKS Blueprints examples, the GitLab EKS Fargate Runner example references EKS Blueprint modules with a relative directory reference. This is why we are cloning it into a subdirectory of the EKS Blueprints project.\n5. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n6. `terraform init`\n\n   **Important**: If you are using AWS CloudShell and your session times out, the /terraform folder and the installed utilities will be gone. You would have to reproduce the above steps to get the Terraform template in a usable state again. This is most likely to happen when you go to use Terraform to delete the stack after playing with it for some days.\n\n   The next few instructions are from: **https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-deploy**. Note the `-state` switch ensures our state is in persistent storage.\n7. `terraform apply -target module.vpc -state=$HOME/tfstate/runner.tfstate`\n8. `terraform apply -target module.eks -state=$HOME/tfstate/runner.tfstate`\n9. **Note:** If you receive “Error: The configmap ”aws-auth” does not exist”, re-run the same command - it will usually update successfully.\n10. `terraform apply -state=$HOME/tfstate/runner.tfstate`\n\nThe previous command will output a kubeconfig command that needs to be run to ensure subsequent kubectl commands work. Run that command. If you are in AWS CloudShell and did not copy the command, this command should work and map to the correct region:\n    `aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name \"glrunner\"`\n\nIf everything was done correctly, you will have an EKS cluster named `karpenter` in the CloudShell region web console like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/eksclusterinconsole.png)  \n\nAnd the output of this console command `kubectl get pods -A` will look like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/cliplaincluster.png)\n\nThe output of this console command `kubectl get nodes -A` will show the Fargate prefix:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/clinodesarefargate.png)\n\n> **Note:** Notice that all the EKS extras (coredns, ebs-cni, and karpenter itself) are also running on Fargate. If you are willing to tolerate some regular Kubernetes nodes, you may be able to save cost by running always-on pods on regular Kubernetes hosts. Since this cluster runs Karpenter, you will not need to manually scale those hosts and EKS makes control plane and node updates easier.\n\n### 3. Install GitLab Runner\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n1. Create an empty GitLab project.\n2. Retrieve a GitLab Runner Token from the project. Keep in mind that using a project token is the easiest way to ensure your experiment runs only on the EKS Fargate Runner. Using a group token may cause your job to run on other runners already setup at your company. You can follow [“Obtain a token”](https://docs.gitlab.com/runner/register/#requirements) from the documentation if you need to.\n3. Perform the following commands back in the AWS CloudShell session.\n4. `nano runnerregistration.yaml`\n5. Paste the following:\n\n   ```yaml\n   gitlabUrl: https://_YOUR_GITLAB_URL_HERE_.com\n   runnerRegistrationToken: _YOUR_GITLAB_RUNNER_TOKEN_HERE_\n   concurrent: 200\n   rbac:\n     create: true\n   runners:\n     tags: eks-fargate\n     runUntagged: true\n     imagePullPolicy: if-not-present\n   envVars:\n     - name: KUBERNETES_POLL_TIMEOUT\n       value: 90  \n   ```\n\n   **Note:** Many more settings are discussed in the documentation for the [Kubernetes Executor](https://docs.gitlab.com/runner/executors/kubernetes.html). \n\n**Hard Lesson:** Using a setting for `concurrent` that is lower than our `parallel` setting in the GitLab job below results in all kinds of failures due to some job pods having to wait for an execution slot. Since it’s Fargate, there is no savings to keeping it lower and no negative impact to making it the complete parallel amount.\n\n6. Replace \\_YOUR_GITLAB_URL_HERE_ with your actual GitLab URL.\n7. Replace \\_YOUR_GITLAB_RUNNER_TOKEN_HERE_ with your actual runner token.\n8. Press CTRL-X to exit and press Y to the save prompt.\n9. `helm repo add gitlab https://charts.gitlab.io`\n10. `helm repo update gitlab`\n11. `helm install --namespace gitlab-runner --create-namespace runner1 -f runnerregistration.yaml gitlab/gitlab-runner`\n12. Wait for a few minutes and check the project’s list of runners for a new one with the tag `eks-fargate`\n\nIn AWS CloudShell the command `kubectl get pods -n gitlab-runner` should produce output similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/runnerlist.png)\n\nAnd in the GitLab Runner list, it will look similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/glrunnerlist.png)\n\n### 4. Run a test job\n\nThe simplest way to test GitLab Runner scaling is using the `parallel:` keyword to schedule multiple copies of a job. It can also be used to create a job matrix where not all jobs do the same thing.\n\nOne or more GitLab Runner Helm deployments can live in any namespace, so you have many to many mapping flexibility for how you think of runners and their Kubernetes context.\n\nIn the GitLab project where you created the runner, use the web IDE to create .gitlab-ci.yml and populate it with the following content:\n   ```yaml\n   parallel-fargate-hello-world:\n     image: public.ecr.aws/docker/library/bash\n     stage: build\n     parallel: 200\n     script:\n       - echo \"Hello Fargate World\"\n   ```\n\n**Hard Lesson:** After hitting the Docker hub image pull rate limit, I shifted to the same container in the AWS Public Elastic Container Registry (ECR), which has an [image pull rate limit](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html) of 10 per second for this scenario.\n\nIf the job does not automatically start, use the pipeline page to force it to run.\n\nIf everything is configured correctly, your final pipeline status panel should look something like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/completedjobs.png)\n\n### 5. Runner scaling experimentation\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\nAdditional runners can be added by re-running the install command with a different name for the runner (if using the same token you’ll have two runners in the same group or project):\n\n`helm install --namespace gitlab-runner runner2 -f runnerregistration.yaml gitlab/gitlab-runner`\n\n200 jobs takes just under 2 minutes.\n\n#### 400 parallel jobs\n\nBy setting up a second identical job (with a unique job name), I was able to process 400 total jobs.\n\n**Hard Lesson:** The runner likes to schedule all jobs in a parallel job on the same runner instance. It does not seem to want to split a large job across multiple runners registered in the same project. So in order to get more than 200 jobs to process, I had to have two registered runners set to `concurrent:200` and two seperate jobs set to `parallel: 200`\n\n400 jobs takes just over 3 minutes.\n\n#### More than 400 parallel jobs\n\nAs I tried to scale higher, jobs started to hang. I tried specifically routing jobs to five runners each capable of 300 parallel jobs. I also tried multiple stages and used a hack of `needs []` to get simultaneous execution of jobs in multiple stages.\n\nI was not successful and there could be a wide variety of reasons why — a riddle for a future iteration.\n\nThis command can be used to update a runner's settings after editing the Helm values file (including the token to move the runner to another context): \n\n`helm upgrade --namespace gitlab-runner -f runnerregistration.yaml runner2 gitlab/gitlab-runner`\n\nI found that when I pushed the limits, I would sometimes end up with hung pods until I understood what needed adjusting. Leaving hung Fargate pods will add up to a lot of cash because the pricing assumes very short execution times. This command helps you terminate job pods without accidentally terminating the runner manager pods:\n\n`kubectl get pods --all-namespaces --no-headers |  awk '{if ($2 ~ \"_YOUR_JOB_POD_PREFACE_*\") print $2}' | xargs kubectl -n _YOUR_RUNNER_NAMESPACE_ delete pod`\n\nDon't forget to replace \\_YOUR_RUNNER_NAMESPACE_ and \\_YOUR_JOB_POD_PREFACE_ “_YOUR_JOB_POD_PREFACE\\_” is the unique preface of ONLY the jobs from a given runner followed by the wildcard star character => \\*\n\nTo uninstall a runner, use:\n\n`helm delete --namespace gitlab-runner runner1`\n\n#### Testing Auto DevOps to prove `image:` tag is honored\n\nTechnically testing Auto DevOps to prove the `image:` tag is honored this isn’t entirely necessary since the above job loads the bash container without the container being specified in any of the runner or infrastructure setup. However, I performed this as a litmus test anyway.\n\nFollow these steps:\n\n1. Create a new project by clicking the “+” sign in the top bar of GitLab.\n2. On the next page, select “New Project/Repository”.\n3. Then “Create from template”.\n4. Select “Ruby on Rails” (first choice).\n5. Once the project creation is complete, register an EKS runner to it (or re-register the existing runner to the new project).\n6. In the project, select “Settings (Gear Icon)” => “CI/CD” => Auto DevOps => Default to Auto DevOps pipeline.\n7. Click “Save changes”.\n\nThe Auto DevOps pipeline should run. If you don’t have a cluster wired up, it will mainly do security scanning, which is sufficient to prove that arbitrary containers can be used by the Fargate-backed GitLab Runner.\n\n### 6. Solution tuning via extensible platform\n\nEKS Blueprints is not only product-managed, it is also an extensible platform or framework. In the spirit of fully leveraging the extensible product managed EKS Blueprints project, you will always want to check if Blueprints is already instrumented for your scenario before writing code. Additionally, if you must write code, you can consider contributing it as an EKS Blueprint extension so the community can take on some responsibility for maintaining it.\n\n1. The EKS Blueprints Managed IaC has a dizzing number of tuning parameters and optional extensions. For instance, if you want the full GitLab Runner logs collected to AWS CloudWatch, it is a simple configuration to add fluentd log agent to push custom logs to CloudWatch.\n2. Using Fargate for always-on containers is a trade-off of compute costs to get rid of Kubernetes node management overhead. This trade-off can be easily reversed in this example by removing the \"kube-system\" from \"fargate_profiles\" - since Karpenter is also installed and configured, the hosts will autoscale for load.\n\n### 7. Teardown\n\nThe next few instructions are from https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-destroy.\n\nIf you are using AWS CloudShell and the /terraform directory no longer exists, perform these steps to re-prepare AWS CloudShell to perform teardown.\n\nIf you are not using AWS CloudShell, skip forward to “Teardown steps”.\n\n1. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n2. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n3. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n4. `cd /terraform/terraform-aws-eks-blueprints/`\n5. `git reset --hard tags/v4.29.0`\n6. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   > **Note:** The above steps can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}` .\n\n7. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n8. `terraform init`\n\nFollow these teardown steps:\n\n1. `helm delete --namespace gitlab-runner runner1`\n2. `helm delete --namespace gitlab-runner runner2`\n3. `terraform destroy -target=\"module.eks_blueprints_kubernetes_addons\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n4. `terraform destroy -target=\"module.eks\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n5. **Note:** If you receive an error about refreshing cached credentials, simply re-run the command again and it will usually update successfully.\n6. `terraform destroy -auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n### Iteration _n_ : We would love your input\n\nThis blog is \"Iteration 1\" precisely because it has not been production load-tested nor specifically cost-engineered. And obviously a “Hello, World” script is not testing much in the way of real work. I really set out to understand if we could run arbitrary containers in a GitLab Fargate setup (and we can) and then got curious about what parallel job scaling might look like with Fargate (and it looks good). The Kubernetes Runner executor has many, many available customizations and it is likely that scaling a production loaded implementation on EKS will reveal the need to tune more of these parameters. \n\n#### **Collaborative contribution challenges**\n\nHere are some ideas for further collaborative work on this project:\n\n- To push the limits, create a configuration that can scale to 1000 simultaneous jobs.\n- An aws-logging config map that uploads runner pod logs to AWS CloudWatch.\n- A cluster configuration where runner managers and everything that is not a runner job run on non-Fargate nodes – if and only if it will be cheaper than Fargate running 24 x 7.\n- A Fargate Spot configuration. It’s important that compute type be noted as a runner tag and it’s important that the same cluster has non-spot instances because some jobs should not run on spot compute and the decision whether to do so should be available to the GitLab CI Developer who is creating an pipeline.\n\n#### Other runner scaling initiatives\n\nWhile GitLab is building the Next Runner Auto-scaling Architecture, [Kubernetes refinements are not a part of this architectural initiative](https://docs.gitlab.com/ee/architecture/blueprints/runner_scaling/#proposal).\n\n#### Everyone can contribute\n\nThis tutorial, as well as code for additional examples, will be maintained as open source as a GitLab Alliances Solution and we’d love to have your contributions as you iterate and discover the configurations necessary for your real-world scenarios. This tutorial is in a group wiki and the code will be in the projects under that group here: [AWS Guided Explorations for EKS Runner Configurations](https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate/-/blob/main/README.md). \n\nPhoto by [Jeremy Lapak](https://unsplash.com/@jeremy_justin?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/runner?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[707,9,683],{"slug":794,"featured":6,"template":688},"eks-fargate-runner","content:en-us:blog:eks-fargate-runner.yml","Eks Fargate Runner","en-us/blog/eks-fargate-runner.yml","en-us/blog/eks-fargate-runner",{"_path":800,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":801,"content":807,"config":813,"_id":815,"_type":14,"title":816,"_source":16,"_file":817,"_stem":818,"_extension":19},"/en-us/blog/environment-friction-cycle",{"title":802,"description":803,"ogTitle":802,"ogDescription":803,"noIndex":6,"ogImage":804,"ogUrl":805,"ogSiteName":673,"ogType":674,"canonicalUrls":805,"schema":806},"How GitLab eliminates value stream friction in dev environments","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682507/Blog/Hero%20Images/sandeep-singh-3KbACriapqQ-unsplash.jpg","https://about.gitlab.com/blog/environment-friction-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-11-17\",\n      }",{"title":808,"description":803,"authors":809,"heroImage":804,"date":810,"body":811,"category":681,"tags":812},"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup",[703],"2022-11-17","\n\nA strong DevOps value stream drives developer empowerment as far left as possible. In GitLab, this is embodied in per-feature branch merge requests that are rich with automated code quality and defect information - including not only findings - but automated remediation capabilities and collaboration. Some defects and code quality issues can only be found by analyzing a running copy of the application, including DAST, IAST, fuzzing and many others. GitLab has built a fully automated, seamless developer environment lifecycle management approach right into the developer experience. In fact, it’s so seamlessly built-in, it can be easy to overlook how critical developer environment lifecycle management is. This article will highlight why and how GitLab adds value using developer environment automation. In addition, while GitLab provides out of the box developer environment lifecycle management for Kubernetes, this article demonstrates an approach and a working example of how to extend that capability to other common cloud-based application framework PaaS offerings.\n\n## Provisioning of development environments is generally a negative feedback loop\n\nIn a prior job, I worked on a DevOps transformation team that supported multiple massive shared development environments in AWS. They were accessible to more than 4,000 developers working to build more than 100 SaaS applications and utility stacks. In the journey to the AWS Cloud, each development team took ownership of the automation required to deploy their applications. Since developers were able to self-service, over time this solved the problem of development friction generated by waiting for environments to be provisioned for testing, feature experiments, integration experiments, etc. \n\nHowever, the other half of the problem then ballooned - environment sprawl - with an untold number of environments idling without management and without knowledge of when they could be torn down. Over time the development environment cost became a significant multiple of production costs. The cloud has solved problems with environment provisioning bottlenecks due to hardware acquisition and provisioning, but this can also inadvertently fuel the high costs of unmanaged sprawl. This problem understandably causes organizations to raise administrative barriers to new development environments.\n\nIn many organizations this becomes a vicious cycle - most especially if developer environments are operated by a different team, or worse, on an independent budget. Environment justification friction usually comes quickly after discovering the true cost of the current running environments. Developers then have to justify the need for new environment requests and they have to make the gravest of promises to disband the environment as soon as they are done. Another friction arises when a separate group is tasked with cost controls and environment provisioning and cleanup. This introduces friction in the form of administrative and work queueing delays. Coordination friction also crops up because an accurate understanding of exactly what is needed for an environment can be challenging to convey. When mistakes are made or key information is missing, developers must go back and forth on support requests to get the configuration completely correct.\n\n## Partial automation can worsen the problem\n\nThat’s the first half of the environment lifecycle, but as I mentioned, even if that is fully automated and under the control of developers, the other half of the feedback loop comes into play. When a given development environment has fulfilled its initial justification reason, the team does not want to destroy it because environments are so hard to justify and create. Then the sprawl starts and, of course, the barriers to new environments are raised even higher. This is a classic negative feedback loop.\n\nSystems theory shows us that sometimes there are just a few key factors in stopping or even reversing a negative feedback loop. Lets take this specific problem apart and talk about how GitLab solves for it.\n\n## Treat developer environments as a complete lifecycle\n\nIn the prior example it is evident that by leaving out the last stage of the environment lifecycle - retirement or tear down - we still end up with a negative feedback loop. Removing provisioning friction actually makes the problem worse if retirement friction is not also addressed at the same time. Solutions to this problem need to address the entire lifecycle to avoid impacting value stream velocity. Neglecting or avoiding the retirement stage of a lifecycle is a common problem across all types of systems. In contrast, by addressing the entire lifecycle we can transform it from being a negative feedback loop to a managed lifecycle.\n\n## The problems of who and when\n\nBuried inside the insidious friction loop are a couple key coordination problems we’ll call “Who and When.” Basically, \"Who\" should create environments and \"When\" should they be created to ensure reasonable cost optimization? Then again, _Who_ should cleanup environments and _When_ do you know that the environment is no longer needed with certainty? Even with highly collaborative teams working hard together for maximum business value, these questions present a difficulty that frequently results in environments running for a long time before they are used and after they are no longer needed. The knowledge of appropriate timing plays a critical role in gaining control over this source of friction.\n\n## The problem of non-immutable development environments\n\nFriction in environment lifecycle management creates a substantial knock-on problem associated with long-lived environments. Long-lived environments that are updated multiple times for various independent projects start to accumulate configuration rot; they become snowflakes with small changes that are left over from non-implemented experiments, software or configuration removals, and other irrelevant bits and pieces. Immutability is the practice of not doing “in place” updates to a computing element, but rather destroying it and replacing it with a fresh, built-from-scratch, element. Docker has made this concept very accepted and effective in production workloads, but development environments frequently do not have this attribute due to automating without the design constraint of immutability, so they are updated in-place for reuse by various initiatives. If the environment lifecycle is not fully automated, it impossible to make them workable on a per-feature branch basis.\n\n## The problem of non-isolated development environments \n\nWhen environments are manually provisioned or when there is a lot of cost or administrative friction to setting them up, environment sharing becomes more common place. This creates sharing contention at many levels. Waiting to schedule into use an environment, pressure to complete work quickly so others can use the environment, and restrictions on the types of changes that can be made to shared environments are just some of the common sharing contention elements that arise. If environments can be isolated, then sharing contention friction evaporates. Pushing this to the extreme of a per-feature branch granularity brings many benefits, but is also difficult.\n\n## Effect on the development value stream\n\nThe effect that a friction-filled environment lifecycle has on the value stream can be immense - how many stories have you heard of projects waylaid for weeks or months while waiting on environment provisioning? What about defects shipped to production because a shared environment had left over configuration during testing? Frequently this friction is tolerated in the value stream because no one will argue that unlimited environment sprawl is an unwise use of company resources. We all turn off the lights in our home when we are no longer using a room and it is good business sense and good stewardship not to leave idle resources running at work.\n\nThe concept of good stewardship of planetary resources is actually becoming an architectural level priority in the technology sector. This is in evidenced in AWS’ [introduction of the “Sustainability” pillar to the AWS Well Architected principals in 2021](https://aws.amazon.com/blogs/aws/sustainability-pillar-well-architected-framework/) and many other green initiatives in the technology sector.\n\nIt’s imperative that efforts to improve the development value stream consider whether developer environment management friction is hampering the breadth, depth and velocity of product management and software development.\n\n## Seamless and fully automated review environment lifecycle management\n\nWhat if this negative feedback loop could be stopped? What if new environments were seamless and automatically created right at the moment they were needed? What if developers were completely happy to immediately tear down an environment when they were done because it takes no justification nor effort on their part to create new one at will?\n\nEnter GitLab Review Environments!\n\nGitLab review apps are created by the developer action of creating a new branch. No humans are involved as the environment is deployed while the developer is musing their first code changes on their branch.\n\nAs the developer pushes code updates the review apps are automatically updated with the changes and all quality checks and security scanning are run to ensure the developer understands that they introduced a vulnerability or quality defect. This is done within the shortest possible amount of time after the defect was introduced.\n\nWhen the developer merges their code, the review app is automatically torn down.\n\nThis seamless approach to developer environment provisioning and cleanup addresses enough of the critical factors in the negative feedback loop that it is effectively nullified.\n\nConsider:\n\n- Developer environment provisioning and cleanup are fully automated, transparent, developer-initiated activities. They do not consume people nor human process resources, which are always legions slower and more expensive than technology solutions.\n- Provisioning and cleanup timing are exactly synchronized with the developer’s need, preventing inefficiencies in idle time before or after environment usage.\n- They are immutable on a new branch basis - a new branch always creates a new environment from fresh copy of the latest code.\n- They are isolated - no sharing contention and no mixing of varying configuration.\n- They treat developer environments as a lifecycle.\n\nIt is so transparent that some developers may not even realize that their feature branch has an isolated environment associated with it.\n\n## Hard dollar costs are important and opportunity costs are paramount\n\nGitLab environments positively contribute to the value stream in two critical ways. First, the actual waste of idle machines is dramatically reduced. However, more importantly, all the human processes that end up being applied to managing that waste also disappear. Machines running in the cloud are only lost money. Inefficient use of people’s time carries a high dollar cost but it also carries a higher opportunity cost. There are so many value-generating activities people can do when their time is unencumbered by cost-control administration.\n\n## Multiplying the value stream contributions of developer review environments\n\nDeveloper environment friction is an industry-wide challenge and GitLab nearly eliminates the core problems of this feedback cycle. However, GitLab has also gone way beyond simply addressing this problem by creating a lot of additional value through seamless per-feature branch developer environments.\n\nHere is a visualization of where dynamic review environments plug into the overall GitLab developer workflow.\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/gitlabenvironmentlifecycle.png)\n\n**Figure 1: Review environments with AWS Cloud Services**\n\nFigure 1 is showing GitLab’s full development cycle support with a little art of the possible thrown in around interfacing with AWS deployment services. The green dashed arrow indicates that GitLab deploys a review environment when the branch is first created. Since the green arrow is part of the developer's iteration loop, the green arrow is also depicting that review app updates are done on each code push. \n\nThe light purple box is showing that the iterative development and CI checks are all within the context of a merge request (MR), which provides a Single Pane of Glass (SPOG) for all quality checks, vulnerabilities and collaboration. Finally, when the merge is done, the review environment is cleaned up. The feature branch merge request is the furthest left that visibility and remediation can be shifted. GitLab’s shifting of this into the developer feature branch is what gives developers a semi-private opportunity to fix any quality or security findings with the specific code they have added or updated.\n\nOne other thing to note here is that when GitLab CD code is engineered to handle review environments, it is reused for all other preproduction and production environments. The set of AWS icons after the “Release” icon would be using the same deployment code. However, if the GitLab CD code is engineered only around deploying to a set of static environments, it is not automatically capable of review environments. Review environment support is a superset of static environment support.\n\n## Review environments enable a profound shift left of visibility and remediation\n\nAt GitLab “shift left” is not just about “problem visibility” but also about “full developer enablement to resolve problems” while in-context. GitLab merge requests provide critical elements that encourage developers to get into a habit of defect remediation:\n\n- **Context** - Defect and vulnerability reporting is only for code the developer changed in their branch and is tracked by the merge request (MR) for that branch.\n- **Responsibility** - Since MRs and branches are associated to an individual, it is evident to the developer (and the whole team) what defects were introduced or discovered by which developers.\n- **Timing** - Developers become aware of defects nearly as soon as they are introduced, not weeks or months after having integrated with other code. If they were working on a physical product, we can envision that all the parts are still on the assembly bench.\n- **Visibility - Appropriately Local, Then Appropriately Global** - Visibility of defects is context specific. While a developer has an open MR that is still a work in progress, they can be left alone to remedy accidentally-introduced defects with little concern from others because the visibility is local to the MR. However, once they seek approvals to merge their code, then the approval process for the MR will cause the visibility of any unresolved defects and vulnerabilities to come to the attention of everyone involved in the approval process. This ensures that oversight happens with just the right timing - not too early and not forgotten. This makes a large-scale contribution to human efficiency in the development value stream.\n- **Advisement** - As much as possible GitLab integrates tools and advice right into the feature branch MR context where the defects are visible. Developers are given full vulnerability details and can take just-in-time training on specific vulnerabilities. \n- **Automated Remediation** - Developers can choose to apply auto-remediations when they are available.\n- **Collaboration** - They can use MR comments and new issues to collaborate with team mates throughout the organization on resolving defects of all types.\n\nHaving seamless, effortless review environments at a per-feature branch granularity is a critical ingredient in GitLab’s ability to maximize the shift left of the above developer capabilities. This is most critical in the developer checks that require a running copy of application, which is provided by the review environments. These checks include things such as DAST, IAST, API fuzzing and accessibility testing. The industry is also continuing to multiply the types of defect scanners that require an actively running copy of the application.\n\n## Extending GitLab review environments to other cloud application framework PaaS\n\nSo you may be thinking, “I love GitLab review environments, but not all of our applications are targeting Kubernetes.” It is true that the out- of-the-box showcasing of GitLab review environments depends on Kubernetes. One of the key reasons for this is that Kubernetes provides an integrated declarative deployment capability known as deployment manifests. The environment isolation capability, known as namespaces, also provides a critical capability. GitLab wires these Kubernetes capabilities up to a few key pieces of GitLab CD to accomplish the magic of isolated, per-feature branch review environments.\n\nAs far as I know there is no formal or defacto industry term for what I’ll call “Cloud Application Framework PaaS.” Cloud-provided PaaS can be targeted at various “levels” of the problem of building applications. For instance, primitive components such as AWS ELB address the problem of application load balancing by providing a variety of virtual, cloud-scaling and secured appliances that you can use as a component of building an application. Another example is [AWS Cognito](https://aws.amazon.com/cognito/) to help with providing user login and profile services to an application build.\n\nHowever, there are also cloud PaaS offerings that seek to solve the entire problem of rapid application building and maintenance. These are services like AWS Amplify and AWS AppRunner. These services frequently knit together primitive PaaS components (such as described above) into a composite that attempts to accelerate the entire process of building applications. Frequently these PaaS also include special CLIs or other developer tools that attempt to abstract the creation, maintenance and deployment of an Infrastructure as Code layer. They also tend to be [GitOps](/topics/gitops/)-oriented by storing this IaC in the same repository as the application code, which enables full control over deployments via Git controls such as branches and merge requests.\n\nThis approach relieves developers of early stage applications from having to learn IaC or hire IaC operations professionals too early. Basically it allows avoidance of overly early optimization of onboarding IaC skills. If the application is indeed successful it is quite common to outgrow the integrated IaC support provided by these specialized PaaS, however, the evolution is very natural because the managed IaC can simply start to be developed by specialists.\n\nThe distinction of cloud application framework PaaS is important when understanding where GitLab can create compound value with Dynamic Review Environments. I will refer to this kind of PaaS as “Cloud Application Infrastructure PaaS” that tries to solve the entire “Building Applications Problem.”\n\nSo we have a bunch of GitLab interfaces and conventions for implementing seamless developer review environments and we have non-Kubernetes cloud application infrastructures that provide declarative deployment interfaces and we can indeed make them work together! Interesting it is all done in GitLab CI YAML, which means that once you see the art of the possible, you can start implementing dynamic review environment lifecycle management for many custom environment types with the existing GitLab features. \n\n## A working, non-Kubernetes example of dynamic review environments in action\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/CloudFormationDeployAnimatedGif.gif)\n\n**Figure 2: Working CD example of review environments for AWS CloudFormation**\n\nFigure 2 shows the details of an actual non-Kubernetes working example called CloudFormation AutoDeploy With Dynamic Review Environments. This project enables any AWS CloudFormation template to be deployed. It specifically supports an isolated stack deployment whenever a review branch is created and then also destroys that environment when the branch is merged. \n\nHere are some of the key design constraints and best practices that allow it to support automated review environments.:\n\n- **The code is implemented as an include.** Notice that the main [.gitlab-ci.yml](https://gitlab.com/guided-explorations/aws/cloudformation-deploy/-/blob/main/.gitlab-ci.yml) files have only variables applicable to this project and then the inclusion of Deploy-AWSCloudFormation.gitlab-ci.yml. This allows you to treat the CloudFormation integration as a managed process, shared include to be improved and updated. If the stress of backward compatibility of managing a shared dependency is too much, you can encourage developers to make a copy of this file to essentially version peg it with their project.\n\n- **Avoids Conflict with Auto DevOps CI Stage Names** - The [standard stages of Auto Devops are here](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml#L70). This constraint allows the auto deploy template to be leveraged. \n\n- **Creates and Sequences Custom Stages as Necessary** - For instance, you can see we’ve added `create-changeset` stage and jobs.\n\n- The `deploy-review` job and it’s `environment:` section must have a very specific construction, let’s look at the important details:\n\n  ```\n    rules:\n      - if: '$CI_COMMIT_BRANCH == \"main\"'\n        when: never\n      - if: '$REVIEW_DISABLED'\n        when: never\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS == \"true\"'\n        when: manual\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS != \"true\"'\n    artifacts:\n      reports:\n        dotenv: envurl.env\n    environment:\n      name: review/$CI_COMMIT_REF_SLUG\n      url: $DYNAMIC_ENVIRONMENT_URL\n      on_stop: stop_review\n  ```\n\n  \n\n  - `rules:` are used to ensure this job only runs when we are not on the main branch. The main branch implements long lived stage and prod environments.\n  - `artifacts:reports:dotenv` allows variables populated during a CI job to become pipeline level variables. The most critical role this does in this job is to allow the URL retrieved from CloudFormation Outputs to be populated into the variable DYNAMIC_ENVIRONMENT_URL. The file `enviurl.env` would have at least the line `DYNAMIC_ENVIRONMENT_URL={url-from-cloudformation}` in it. You can see this in the job code as `echo \"DYNAMIC_ENVIRONMENT_URL=${STACK_ENV_URL}\" >> envurl.env`\n  - `environment:name:` is using the Auto Deploy convention of placing review apps under the review environments top level called `review` The reference $CI_COMMIT_REF_SLUG ensures that the branch (or tag name) is used, but with all illegal characters removed. By your development convention, the Environment Name should become a part of the IaC constructs that ensure both uniqueness as well as identifiability by this pipeline. In GitLab's standard auto deploy for Kubernetes this is done by constructing a namespace that contains the name in this provided parameter. In CloudFormation we make it part of the Stack Name. The value here is exposed in the job as the variable ${ENVRONMENT}.\n  - `environment:url:` it is not self-evident here that the variable DYNAMIC_ENVIRONMENT_URL was populated by the deployment job and added to the file `enviro.env` so that it would contain the right value at this time. This causes the GitLab “Environment” page to have a clickable link to visit the environment. It also is used by DAST and other live application scan engines to find and scan the isolated environment.\n  - `environment:on_stop:` in the deploy-review job is what maps to the `stop_review` named job. This is the magic sauce behind automatic environment deletion when a feature branch is merged. `stop_review` must be written with the correct commands to accomplish the teardown.\n\n## A reusable engineering pattern\n\nThis CloudFormation pattern serves as a higher-level pattern of how GitLab review environments can be adopted to any other cloud “Application Level PaaS.” This is a term I use to indicate a cloud PaaS that is abstracted highly enough that developers think of it as “a place to deploy applications.” Perhaps a good way to contrast it with PaaS that does not claim to serve as an entire application platform. Cloud-based load balancers are a good example of a PaaS that performs a utility function for applications but is not a place to build an entire cloud application. \n\n## Application PaaS for abstracting IaC concerns for developers\n\nGitLab auto deploy combines well with the cloud application framework PaaS that has a disposition toward developer productivity by reducing or eliminating IaC management required by developers. AWS Amplify has such productivity support in the form of a developer specific CLI which allows impacting to be authored and updated in the same Git repository where the application code is stored. Adding an entire scaling database PaaS is as simple as running a single CLI command.\n\nGenerally such Application PaaS not only generate and help maintain IaC through highly abstracted CLI or UI actions, they also contain a single `deploy` command which is easily combined with a GitLab Auto Deploy template for working with that particular Application PaaS.\n\n## Wrap up\n\nHopefully this article has helped you understand that:\n\n- GitLab already contains a super valuable feature that automates developer environment lifecycle management.\n- It is critical in addressing a key friction in the DevOps value chain.\n- It can be extended beyond Kubernetes to other cloud application framework PaaS offerings.\n\n\nPhoto by [Sandeep Singh](https://unsplash.com/@funjabi?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/friction?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[707,9,683],{"slug":814,"featured":6,"template":688},"environment-friction-cycle","content:en-us:blog:environment-friction-cycle.yml","Environment Friction Cycle","en-us/blog/environment-friction-cycle.yml","en-us/blog/environment-friction-cycle",{"_path":820,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":821,"content":827,"config":834,"_id":836,"_type":14,"title":837,"_source":16,"_file":838,"_stem":839,"_extension":19},"/en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"title":822,"description":823,"ogTitle":822,"ogDescription":823,"noIndex":6,"ogImage":824,"ogUrl":825,"ogSiteName":673,"ogType":674,"canonicalUrls":825,"schema":826},"Fast Python Flask server deployment with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Python Flask server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098427/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098427691.png","https://about.gitlab.com/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Python Flask server deployment with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2024-11-04\",\n      }",{"title":822,"description":823,"authors":828,"heroImage":824,"date":830,"body":831,"category":681,"tags":832},[747,829],"Jerez Solis","2024-11-04","Deploying an application to the cloud often requires assistance from production or DevOps engineers. GitLab's Google Cloud integration empowers developers to handle deployments independently. In this tutorial, you'll learn how to deploy a Python Flask server to Google Cloud in less than 10 minutes. Whether you’re a solo developer or part of a large team, this setup allows you to deploy applications efficiently.\n\nYou'll learn how to:\n\n- Create a new project in GitLab\n- Create a Flask server utilizing `main.py`\n- Utilize the Google Cloud integration to create a Service account\n- Utilize the Google Cloud integration to create Cloud Run via a merge request\n- Access your newly deployed Flask server\n- Clean up your environment\n\n## Prerequisites:\n- Owner access on a Google Cloud Platform project\n- Working knowledge of Python\n- Working knowledge of GitLab CI\n- 10 minutes\n\n## Step-by-step Python Flask server deployment to Google Cloud\n\n**1. Create a new project in GitLab.**\n\nWe decided to call our project \"python-flask-cloud-run\" for simplicity.\n\n![python flask server - create a new project in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098438036.png)\n\n**2. Create a flask server utilizing main.py demo.**\n\nFind the `main.py` demo here: [https://gitlab.com/demos/applications/python-flask-cloud-run](https://gitlab.com/demos/applications/python-flask-cloud-run).\n\n```python\nimport os\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\nif __name__ == \"__main__\":\n    app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n```\n\n**3. Create a `requirements.txt` with the following dependencies.**\n\n```\nFlask==3.0.3\ngunicorn==22.0.0\nWerkzeug==3.0.3\n```\n\n**4. Utilizing the Google Cloud integration, create a Service account.**\n\nNavigate to **Operate > Google Cloud > Create Service account**.\n\n![python flask server - create service account](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098438037.png)\n\n**5. Also configure the region you would like the Cloud Run instance to deploy to.**\n\n![python flask server - configure the region](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098438038.png)\n\n**6. Utilizing the Google Cloud integration, configure Cloud Run via merge request.**\n\n![python flask server - deployments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098438041.png)\n\n**7. This will open a merge request. Immediately merge this merge request.**\n\n![python flask server - enable deployments to Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098438043.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![python flask server - variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098438044.png)\n\n**8. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.**\n\n![python flask server - update dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098438045.png)\n\n\u003Cbr>\u003C/br>\n\n![python flask server - dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098438046.png)\n\n**9. Click the Service URL to view your newly deployed Flask server.**\n\nNavigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![python flask server - deployments list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098438047.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![python flask server - main job listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098438048.png)\n\n## Next steps\n\nTo get started with developing your Flask application, try adding another endpoint. For instance, in your `main.py` file, you can add a **/bye** endpoint as shown below:\n\n```\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n## Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> For more DevSecOps capabilities, [start a free 60-day trial of GitLab Ultimate and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com/blog/%2F).",[684,833,751,9],"cloud native",{"slug":835,"featured":91,"template":688},"fast-python-flask-server-deployment-with-gitlab-google-cloud","content:en-us:blog:fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","Fast Python Flask Server Deployment With Gitlab Google Cloud","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"_path":841,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":842,"content":848,"config":856,"_id":858,"_type":14,"title":859,"_source":16,"_file":860,"_stem":861,"_extension":19},"/en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"title":843,"description":844,"ogTitle":843,"ogDescription":844,"noIndex":6,"ogImage":845,"ogUrl":846,"ogSiteName":673,"ogType":674,"canonicalUrls":846,"schema":847},"Google Cloud integrations for secure Cloud Run deployments at GitLab","This tutorial demonstrates how to use GitLab’s Google Artifact Management integration to deploy to Google Cloud Run, a serverless runtime for containers application.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099336/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750099336757.png","https://about.gitlab.com/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Cloud integrations for secure Cloud Run deployments at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"},{\"@type\":\"Person\",\"name\":\"Matt Genelin\"}],\n        \"datePublished\": \"2025-01-15\",\n      }",{"title":843,"description":844,"authors":849,"heroImage":845,"date":852,"body":853,"category":685,"tags":854},[850,851],"Regnard Raquedan","Matt Genelin","2025-01-15","*This tutorial is from a recent Arctiq, GitLab, and Google in-person workshop. The goal was to explore common security challenges faced by organizations as they journey to the cloud.*\n\nThis tutorial will help you learn about the [Google Cloud integrations in GitLab](https://cloud.google.com/docs/gitlab). These features are meant to help accelerate and improve security of deployments to Google Cloud.\n\n![Google integrations list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099345/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099345112.png)\n\n## Prerequisites\n\n1. [Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects)  \n2. Appropriate [IAM permissions](https://cloud.google.com/iam/docs/) for security, Artifact Registry, and Cloud Run usage. For this tutorial, ensure you have the \"Owner\" role with the aforementioned project.\n\n## Setting up Workload Identity Federation\n\nIn this step, we configure GitLab to connect Google Cloud's Workload Identity Federation to reduce the need for service accounts and let the two platforms use short-lived credentials on-demand.\n\n1. On the left sidebar, select **Search** or go to and find your group or project. If you configure this in a group, settings apply to all projects within by default.  \n2. Select **Settings \\> Integrations**.  \n3. Select **Google Cloud IAM**.  \n4. Input the Project ID and Project number in the respective fields. This information can be obtained from the Google Cloud console [Welcome](https://console.cloud.google.com/welcome) page of your project.  \n5. Input the desired Pool ID and Provider ID in the respective fields. These are values that you provide and must be unique from other Pool and Provider IDs.  \n6. Copy the generated command and then go to the **Google Cloud console**.  \n7. Run **Cloud Shell** and execute the generated command from the Workload Identity Federation integration page.  \n8. Once successful, the **Google Cloud IAM** integration will be designated as active in the Integrations list at the GitLab project.\n\n## Artifact Registry configuration\n\nAs an alternative to GitLab's own place to host artifacts, deploying to Google Cloud's Artifact Registry is another way to leverage their infrastructure. This section will provide steps on how to use GitLab's native integration with Artifact Registry. Note that Workload Identity Federation must already be configured prior to this.\n\n1. At the **Google Cloud** console, go to **Artifact Registry** via search or the main navigation.  \n2. Create a new repository by clicking the **\"+\"** icon. At the creation page, provide a name and keep the **Docker** format and **Standard** mode selected. Select **Region** and choose **us-central1**. Leave the rest at the default settings and click **Create**.  \n3. Once the repository is created and confirmed, go back to your GitLab project.  \n4. In your GitLab project, on the left sidebar, select **Settings > Integrations**. Then select **Google Artifact Registry**.  \n5. Under Enable integration, select the **Active** checkbox, then complete the fields:  \n   * Google Cloud project ID: The ID of the Google Cloud project where your Artifact Registry repository is located.  \n   * Repository name: The name of your Artifact Registry repository.  \n   * Repository location: The location of your Artifact Registry repository. (`us-central1` is assumed.)  \n6. In **Configure Google Cloud IAM policies**, follow the onscreen instructions to set up the IAM policies in Google Cloud. These policies are required to use the Artifact Registry repository in your GitLab project. Select **Save** changes.  \n7. To view your Google Cloud artifacts, on the left sidebar, select **Deploy > Google Artifact Registry**.\n\n## Cloud Run configuration\n\n1. Enable the Cloud Run API, if not done already. Go to **APIs & Services > Enabled APIs & Services**. From there, click **Enable APIs & Services** at the top and search for **Cloud Run Admin API**. Select the search result and enable the API.  \n2. Configure the IAM policies in Google Cloud to grant permissions to allow the Cloud Run CI/CD component to deploy to Cloud Run.\n\n```\nGCP_PROJECT_ID=\"\u003CPROJECT ID>\"\nGCP_PROJECT_NUMBER=\"\u003CPROJECT NUMBER>\"\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003CPOOL ID>\"\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n## Deploy to Cloud Run\n\nIn this section, you will use Gitlab's CI/CD components to deploy to Cloud Run, Google Cloud's serverless runtime for containers.\n\n1. Go to the GitLab project and from the list of files in the source code, find `.gitlab-ci.yaml`. Click the **file name** and the single file editor will show up. Click the **Edit** button and select the **Open in Web IDE** option.  \n2. In Web IDE, copy-paste the following code:\n\n```\nstages:\n    - build\n    - upload\n    - deploy\n```\n\nThis code snippet sets up three stages in the pipeline: build, upload, and deploy.\n\n1. The next step is to create two CI/CD variables in the same YAML file:\n\n```\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n```\n\nThe first variable, `GITLAB\\_IMAGE`, denotes the container image that the pipeline creates by default. The second one, `AR\\_IMAGE`, denotes the location at Google Cloud's Artifact Registry where the container image will be pushed to.\n\n2. Next, define the code that will build the container image:\n\n```\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n```\n\nThis code uses [pre-defined CI/CD variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) for the Docker commands.\n\n3. The final step is using two CI/CD components to deploy to Google Cloud. The first component integrates with Artifact Registry and the second is the deployment to Cloud Run:\n\n```\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\nReplace \u003CPROJECT_ID> with your Google Cloud Project ID. Replace with the [Google Cloud region](https://cloud.google.com/compute/docs/regions-zones) most appropriate to your location. `us-central1` is assumed.\n\nCommit the changes and push to the main branch. For reference, the final `.gitlab-ci.yaml` should look like this, noting to replace the \u003CPROJECT ID> and \u003CREGION> with the appropriate values:\n\n```\nstages:\n    - build\n    - upload\n    - deploy\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\n1. Go back to the main GitLab project and view the pipeline that was just initiated. Take note of the stages that should be the same stages that were defined in Step 2.  \n2. Once the pipeline is complete, go to the Google Cloud console and then **Cloud Run** via search or navigation. A new Cloud Run service called `tanuki-racing` should be created.  \n3. Click the **service name** and then go to the **Security** tab. Ensure that the service is set to **Allow unauthenticated invocations**. This will make the deployed app publicly available. The app URL posted on screen is now available and should open a new browser tab when clicked.\n\nBy utilizing GitLab’s CI/CD pipelines to build and push a containerized application to Google Artifact Registry, you can see the power of GitLab’s AI-powered DevSecOps Platform as a means to building secure applications. GitLab also deployed the containerized application to Google’s Cloud Run as a low-cost running application on the public internet. Using GitLab to instrument building an application, pushing a container and triggering a cloud run deployment allows DevOps engineers to have the assurance that secure applications are being run on the public-facing internet.\n\n> [Sign up for a 60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) to begin working with these integrations. Also, check out our [solutions architecture area](https://about.gitlab.com/blog/tags/solutions-architecture/) for more Gitlab and Google Cloud tutorials.",[231,751,855,684,9],"GKE",{"slug":857,"featured":6,"template":688},"google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","content:en-us:blog:google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","Google Cloud Integrations For Secure Cloud Run Deployments At Gitlab","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"_path":863,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":864,"content":870,"config":876,"_id":878,"_type":14,"title":879,"_source":16,"_file":880,"_stem":881,"_extension":19},"/en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"title":865,"description":866,"ogTitle":865,"ogDescription":866,"noIndex":6,"ogImage":867,"ogUrl":868,"ogSiteName":673,"ogType":674,"canonicalUrls":868,"schema":869},"How to deploy a PHP app using GitLab's Cloud Run integration","Are you using PHP and want an easy way to deploy your application to Google Cloud? Follow this guide to deploy your app with Google Cloud Run in under 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098264/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_519147119_2RafH61mqosMZv8HGAlsUj_1750098264407.jpg","https://about.gitlab.com/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a PHP app using GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":865,"description":866,"authors":871,"heroImage":867,"date":873,"body":874,"category":681,"tags":875},[872,747],"Christian Nnachi","2024-12-10","Writing PHP application code and ensuring the application is running smoothly in production are often two different skills sets owned by two different engineers. GitLab aims to bridge the gap by enabling the engineer who has written the PHP application code to also deploy it into Google Cloud Platform with little effort. \n\nWhether you own event-driven, long-running services or deploy containerized jobs to process data, Google Cloud Run automatically scales your containers up and down from zero — this means you only pay when your code is running.\n\nIf you are a PHP developer who would like to deploy your application with minimal effort to Google Cloud Platform, this guide will show you how using the GitLab Google Cloud Run integration. \n\n# Overview\n\n- Create a new project in GitLab\n- Set up your PHP application\n- Utilizing the Google Cloud integration, create a Service account\n- Utilizing the Google Cloud integration, configure Cloud Run via merge request\n- Try adding another endpoint\n- Clean up\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of [PHP](https://www.php.net/manual/en/introduction.php), an open-source, general-purpose scripting language\n- Working knowledge of [GitLab CI](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\n- 10 minutes\n\n## 1. Create a new project in GitLab.\n\nWe decided to call our project `PHP cloud-run` for simplicity.\n\n![PHP cloud- run project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098287/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098287615.png)\n\nThen, create an index.php app[https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php](https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php).\n\n```php\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\necho sprintf('Hello %s!', $name);\n```\n\n## 2. Utilizing the Google Cloud integration, create a Service account.\n\nNavigate to **Operate > Google Cloud > Create Service account**. \n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098287616.png)\n\nThen configure the region you would like the Cloud Run instance deployed to.\n\n![Configure region screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098287618.png)\n\n## 3. Utilizing the Google Cloud integration, configure **Cloud Run via merge request**.\n\n![Deployment configuration screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098287620.png)\n\nThis will open a merge request. Immediately merge this merge request.\n\n![Enable Deployments to Cloud run screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098287622.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`,  `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098287624.png)\n\nCheck your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.\n\n![merge branch screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098287625.png)\n\n\u003Cbr>\u003C/br>\n\n![Google Cloud Run deployed with GitLab CI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098287627.png)\n\n## 4. Click the **Service URL** to view your newly deployed Flask server.\n\nIn addition, you can navigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![Environments screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098287628.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098287631.png)\n\n## 5. Add another endpoint\n\nTo get started with developing your PHP application, try adding another endpoint. For example, in your main file, you can add a `/bye` endpoint like this:\n\n```\n\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\n\nif ($_SERVER['REQUEST_URI'] == '/bye') {\n    echo sprintf('Goodbye %s!', $name);\n} else {\n    echo sprintf('Hello %s!', $name);\n}\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once the job is complete, go back to the Service URL and navigate to the `/bye` endpoint to see the new functionality in action.\n\n### Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Check out more [easy-to-follow tutorials from our Solutions Architecture team](https://about.gitlab.com/blog/tags/solutions-architecture/).",[9,684,751,231],{"slug":877,"featured":6,"template":688},"how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","content:en-us:blog:how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","How To Deploy A Php App Using Gitlabs Cloud Run Integration","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"_path":883,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":884,"content":890,"config":897,"_id":899,"_type":14,"title":900,"_source":16,"_file":901,"_stem":902,"_extension":19},"/en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id",{"title":885,"description":886,"ogTitle":885,"ogDescription":886,"noIndex":6,"ogImage":887,"ogUrl":888,"ogSiteName":673,"ogType":674,"canonicalUrls":888,"schema":889},"How-to: GitLab Single Sign-on with SAML, SCIM, and Azure’s Entra ID","Follow this detailed walk-through of the configuration steps required to configure GitLab Single Sign-on, using Microsoft Azure’s Entra ID as the identity provider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098047/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1097303277_6gTk7M1DNx0tFuovupVFB1_1750098046895.jpg","https://about.gitlab.com/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How-to: GitLab Single Sign-on with SAML, SCIM, and Azure’s Entra ID\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rob Jackson\"}],\n        \"datePublished\": \"2025-01-23\",\n      }",{"title":885,"description":886,"authors":891,"heroImage":887,"date":893,"body":894,"category":895,"tags":896},[892],"Rob Jackson","2025-01-23","As organizations increase in size, it becomes increasingly difficult and critical to ensure that the right team members have access to the right groups and projects within their development platform. GitLab offers some powerful methods to manage user access, especially now with [custom roles](https://about.gitlab.com/blog/how-to-tailor-gitlab-access-with-custom-roles/), but performing this at scale through a point-and-click user interface can be frustrating. However, all is not lost. You can use Security Assertion Markup Language (SAML) and System for Cross-domain Identity Management (SCIM) as a solution. (There are moments where I’m grateful for acronyms.) \n\nI was researching this topic for a particular customer, and walking through the GitLab documentation on the capabilities, but I never felt like I truly understood the integration. As is often the case, especially when dealing with integrating components, the knowledge from experience far outweighs that gained from reading or watching. In that light, I wanted to share my steps along this path and invite you all to join me. All you need is a free trial of Microsoft Azure Entra ID and GitLab Premium with a top-level group on GitLab.com.  \n\n**Note:** This exercise produces a working integration, however, for production environments there may be necessary deviations. For example, the user account email for the identity provider (Entra ID in this case) will likely not match your GitLab account email. \n\n## Creating the application in Entra ID\n\nFirst, go to the Entra ID admin center. Within the **Applications** area, select **Enterprise Applications**. We’re going to create a new application, and then create our own application.\n\n![Entra ID application creation flow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750098073325.png)\n\n\u003Ccenter>\u003Ci>Figure 1: Entra ID application creation flow\u003C/i>\u003C/center>\u003Cbr>\n\nWith our new application created, we can start configuring the single sign-on (SSO) parameters for our application. For this task, you may want to have side-by-side browser windows. One window on your Entra ID application, and another window on the SAML settings for your GitLab group. Those settings are located under **Settings**, then SAML SSO on the left side of your GitLab window, as shown in Figure 2. If you don’t see this option, you aren’t in the top-level group, don’t have permission to configure SAML, or don’t have GitLab Premium enabled for that group.\n\n![GitLab SAML configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098073326.png)\n\n\u003Ccenter>\u003Ci>Figure 2: GitLab SAML configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWithin your Entra ID interface, select **Single sign-on** and click the SAML card.\n\n![Entra ID SAML configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image24_aHR0cHM6_1750098073328.png)\n\n\u003Ccenter>\u003Ci>Figure 3: Entra ID SAML configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWith the side-by-side view, the SAML configuration settings are on the left and the GitLab SSO settings on the right. \n\n![Side-by-side view of Entra ID and GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image16_aHR0cHM6_1750098073330.png)\n\n\u003Ccenter>\u003Ci>Figure 4: Side-by-side view of Entra ID and GitLab\u003C/i>\u003C/center>\u003Cbr>\n\nNow we can start copying and pasting parameters. Within the Entra ID interface, select **Edit** within the “Basic SAML Configuration” block. The parameter sources and destination are identified in the following table.\n\n| Source (GitLab) | Destination (Entra ID) |\n| :---------------- | :------: | \n| Identifier        |   Identifier (Entity ID)   | \n| Assertion consumer service URL |   Reply URL (Assertion Consumer Service URL)   | \n| GitLab single sign-on URL    |  Sign on URL (Optional)   | \n\n\u003Cbr>\nOnce completed, your side-by-side view should appear similar to the following (noting the URLs are unique to your environment).\u003Cbr>\n\n![Completed basic SAML SSO configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098073332.png)\n\n\u003Ccenter>\u003Ci>Figure 5: Completed basic SAML SSO configuration\u003C/i>\u003C/center>\u003Cbr>\n\nClick **Save** within the Entra ID “Basic SML Configuration” window to save your hard work thus far. Note: You may need to click on the “X” in the upper right of the “Basic SAML Configuration” window if it doesn’t close automatically. \n\nAfter this window closes, you may get a popup to test single sign-on with your application. Select **No, I’ll test later**, because we still have more work to do (there is always more work to do). \n\n## Configuring attributes and claims\n\nWithin the Entra ID user interface, look for the section for “Attributes and Claims,” and click the **Edit** pencil icon. The first thing we want to do is modify the Unique User identifier (Name ID) value, so click on that row and set the Source attribute to **user.objectid**. Additionally, the Name identifier format must be updated, and set to **Persistent**.\n\n![Configuring attributes and claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750098073333.png)\n\n\u003Ccenter>\u003Ci>Figure 6: Configuring attributes and claims\u003C/i>\u003C/center>\u003Cbr>\n\nSave that claim configuration. Now we have additional claims to configure, but there are only three that we need here. So, feel free to go wild and delete those default four items under **Additional claims**, or you can edit the existing ones to match the table below. Note that these values (specifically, the Name) are case sensitive. \n\u003Cbr>\n\n| Name | Namespace | Source Attribute |\n| :---------------- | :------: | :------: | \n|emailaddress |http://schemas.microsoft.com/ws/2008/06/identity/claims | user.otheremail |\n| NameID | http://schemas.microsoft.com/ws/2008/06/identity/claims |user.objectid |\n\n\u003Cbr>\n\nThe resulting claims configuration should appear as follows. Note the use of **otheremail** for the “emailaddress” attribute. This was necessary for me as my primary email addresses within Entra ID are not the addresses used on GitLab.com. If you recall, when I set up my “user,\" I modified the contact information to include my gitlab.com email address as one of my “Other emails.” \n\n![Configuring the claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750098073335.png)  \n\n\u003Ccenter>\u003Ci>Figure 7: Configuring the claims\u003C/i>\u003C/center>\u003Cbr>\n\nWith your attributes configured, under the Advance settings, enable **Include attribute name format** setting.\n\n![Advanced claims configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098073336.png)\n\n\u003Ccenter>\u003Ci>Figure 8: Advanced claims configuration\u003C/i>\u003C/center>\u003Cbr>\n\nYour \"Attributes and Claims\" window should now look similar to Figure 9 below.\n\n![Configured attributes and claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750098073337.png)\n\n\u003Ccenter>\u003Ci>Figure 9: Configured attributes and claims\u003C/i>\u003C/center>\u003Cbr>\n\nIf you’re happy, or at least relatively content, with your configuration, click the “X” in the top right corner of the \"Attributes and Claims\" window to close it. \n\n## Configuring and assigning users\n\nNow that we have our application configured, we need to ensure that our users have been assigned to that application. I'll assume you’re working with a test instance that does not have the same email address as what is configured within your GitLab.com namespace. \n\nSo let’s go to the “Users and groups” within the Entra ID user interface for your configured application.\n\n![Managing application users and groups](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750098073338.png)\n\n\u003Ccenter>\u003Ci>Figure 10: Managing application users and groups\u003C/i>\u003C/center>\u003Cbr>\n\nSelect **Add user/group**, and under the “Users and groups” where it says “None Selected,” click that text. Now you can select the user(s) to add to your application. These are the users that will be permitted to log into GitLab, authenticating themselves through Entra ID.\n\n![User selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image23_aHR0cHM6_1750098073339.png)\n\n\u003Ccenter>\u003Ci>Figure 11: User selection\u003C/i>\u003C/center>\u003Cbr>\n\nOnce selected, at the bottom of that page, click **Select**, and at the bottom of the next, select **Assign**. Now you should have a user assigned to your application.\n\n![User assigned to application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750098073340.png)\n\n\u003Ccenter>\u003Ci>Figure 12: User assigned to application\u003C/i>\u003C/center>\u003Cbr>\n\nNext, we need to ensure that the GitLab.com email address for that user is configured correctly. By clicking on the user itself, we can modify or configure some  additional information about that user. We can see below, the User principal name, which is based on an “onmicrosoft” domain. This is not the email address I have associated with my GitLab.com account. If you recall that we set the “Email address” attribute to “otheremail,” this is where we now configure that “other” email address. \n\n![User properties](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750098073341.png)\n\n\u003Ccenter>\u003Ci>Figure 13: User properties\u003C/i>\u003C/center>\u003Cbr>\n\nClick the option to **Edit properties** for the user, and click on the **Contact Information** heading. Here we can add other emails – more specifically, the email address utilized for your GitLab.com account.  \n\n![Configuration of alternate email address](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750098073342.png)\n\n\u003Ccenter>\u003Ci>Figure 14: Configuration of alternate email address\u003C/i>\u003C/center>\u003Cbr>\n\nThat should complete the configuration parameters that we need in Entra ID, but wait, there’s more. \n\nWithin the GitLab side now, you will need to configure a couple parameters. First, you might as well enable SAML for the group as that’s kind of a key piece here. GitLab offers some additional options to disable password authentication or enforce SSO to reduce the security risks within your application, but we’ll leave those unchecked for now. Similar to the table above, we’ll need a couple things from Entra ID to configure into GitLab. Please refer to the table below. \n\u003Cbr>\n\n| Source (Entra ID) | Destination (GitLab) | \n| :---------------- | :------: | \n|Login URL |Identity provider single sign-on URL |\n| Thumbprint | Certificate fingerprint|\n\n\u003Cbr>\n\n![GitLab SAML configuration from Entra ID](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image25_aHR0cHM6_1750098073343.png)\n\n\u003Ccenter>\u003Ci>Figure 15: GitLab SAML configuration from Entra ID\u003C/i>\u003C/center>\u003Cbr>\n\nLastly, you want to configure the default membership role for users logging in via SAML. Note that the access that you set for users here will cascade down to other groups and projects within your top-level group. Therefore, I would strongly recommend NOT setting this role to be “Owner.” Either “Guest” or “Minimal Access” would be acceptable options here, depending on the security posture of your organization. For more information about what these roles can and can not do, refer to the GitLab documentation on [Roles and Permissions](https://docs.gitlab.com/ee/user/permissions.html#roles). Now, save your work on the GitLab interface by clicking that beautiful blue **Save changes** button.\n\nWith your GitLab settings saved, you can now test your setup. I would encourage you to do this both through the “Verify SAML Configuration” on the GitLab system as well as with the Entra ID SSO \"Test\" button.\n\n## Troubleshooting SAML\n\nIn addition to the troubleshooting steps included within [GitLab documentation](https://docs.gitlab.com/ee/user/group/saml_sso/troubleshooting.html), I wanted to include a couple other items that I personally experienced. \n\nIf you get an error stating that the SAML reference did not contain an email address, check the Claim name for your email within the “Attributes and Claims” section within your Entra ID application. With GitLab 16.7, we added support for the “2008” attribute names, and at least for the email address setting, I found the default “xmlsoap” name for the email address claim to be a disappointing failure. \n\nAnother common error is “SAML Name ID and email address do not match your user account.” As you may suspect, this error is caused by a mismatch of the “NameID” and “emailaddress” attributes within the Entra ID application. This could be a misconfiguration of the “Attributes and Claims,” but it could also be that the properties of your test user don’t match your configuration. One helpful method to identify exactly what is coming through the SAML exchange is to use a SAML Tracer or SAML Message Decoder plugin with your web browser. \n\n## SCIM\n\nNow that you have SAML configured to enable users to log in via your Entra ID application, let’s make sure that people are assigned to the proper group(s) upon login. This can be incredibly helpful at scale, where instead of manually identifying which groups the particular users belong to, GitLab can learn this information from your identity application, Entra ID in this case. \n\nBecause SCIM utilizes groups to identify group membership, we need to create a group within Entra ID and add the relevant user(s) to the group. For this we’ll need the main administration menu for Entra ID. \n\n![Entra ID Group configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750098073344.png)\n\n\u003Ccenter>\u003Ci>Figure 16: Entra ID Group configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWe’re going to create a new group and assign our user(s) to that group. So click **New group** and configure a new group, which only requires you to configure a “Group name.” I used the default group type of “Security.” Leave the “Membership type” as “Assigned.” From this window, we can also assign the members.\n\n![Creating a New Entra ID Group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098073345.png)\n\n\u003Ccenter>\u003Ci>Figure 17: Creating a New Entra ID Group\u003C/i>\u003C/center>\u003Cbr>\n\nOnce you’ve added the member(s), click **Create** in the bottom of that window. With your group created, and the user(s) assigned to the group, we can configure SCIM.\n\nImmediately below the SAML configuration section within the GitLab UI, you’ll see the “SCIM Token” area. Here you can generate a new token, and copy the endpoint URL, both of which will be useful for the next steps. Note that if you forget or already have a SCIM token, it can be reset. \n\n![SCIM token and endpoint within GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098073345.png)\n\n\u003Ccenter>\u003Ci>Figure 18: SCIM token and endpoint within GitLab\u003C/i>\u003C/center>\u003Cbr>\n\nWith this information saved, return to your Entra ID application configuration. Within the left side menu, you’ll find the following: \n\n![Provisioning SCIM within Entra ID](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098073346.png)\n\n\u003Ccenter>\u003Ci>Figure 19: Provisioning SCIM within Entra ID\u003C/i>\u003C/center>\u003Cbr>\n\nWithin the \"Provisioning\" section, click on **New Configuration**, which opens a new page where that token and URL from GitLab will be used. \n\n![New provisioning configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image22_aHR0cHM6_1750098073348.png)\n\n\u003Ccenter>\u003Ci>Figure 20: New provisioning configuration\u003C/i>\u003C/center>\u003Cbr>\n\nFeel free to test the connection to ensure that you’ve configured the parameters properly. After testing, click on the **Create** button to establish the configuration and work on our mappings and settings. You may need to click the “X” in the top right corner of the panel to return to the overview configuration.\n\nExpand the “Mappings,” which includes two parameters; “Provision Microsoft Entra ID Groups” and “Provision Microsoft Entra ID Users.” SCIM group provisioning isn’t currently supported in GitLab, and although it doesn’t break the integration, keeping group provisioning enabled may cause negligible error messages. Therefore, we want to disable “Provision Microsoft Entra ID Groups,” so click that entry and set the “Enabled” field to “No.” \n\n![Provisioning attribute mapping](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098073349.png)\n\n\u003Ccenter>\u003Ci>Figure 21: Provisioning attribute mapping\u003C/i>\u003C/center>\u003Cbr>\n\nSave that configuration and select “Provision Microsoft Entra ID Users.” Validate that all three \"Target Object Actions\" are enabled, and then proceed to the “Attribute Mapping” section. Delete all existing mappings available to delete (I find this easier because attributes can’t be assigned twice), and then configure the Attribute Mappings per the following table:\n\n| customappsso Attribute (Destination) | Microsoft Entra ID Attribute (Source) | Matching Precedence | Mapping Type |\n| :---------------- | :------: | :------: | :------: | \n|externalID|objectId|1|Direct|\n|active|Switch([IsSoftDeleted], , \"False\", \"True\", \"True\", \"False\")| |Expression|\n|userName|mailNickname| |Direct|\n|name.formatted|displayName| |Direct|\n|Emails[type eq “other”].value|userPrincipalName||Direct|\n\n\u003Cbr>\n\n![Editing attributes](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image26_aHR0cHM6_1750098073349.png)\n\n\u003Ccenter>\u003Ci>Figure 22: Editing attributes\u003C/i>\u003C/center>\u003Cbr>\n\nAfter configuring all of the attribute mappings, the result should be similar to that found in Figure 22.\n\n![Completed attribute mapping configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098073350.png)\n\n\u003Ccenter>\u003Ci>Figure 23: Completed attribute mapping configuration\u003C/i>\u003C/center>\u003Cbr>\n\nNote the use of the “other” email within the **customappssso** attribute. This relates back to the “other” email we configured for the user back in the Entra ID user properties. In a production situation, the emails for the SSO account and the email address for the account within GitLab should match. \n\nWith your mapping complete (congratulations, Ptolemy), there are some advanced configuration settings necessary. Underneath the \"Attribute Mappings,\" click the box for “Show advanced options.” Once this box is checked, a link called “Edit attribute list for customappsso” is revealed.\n\n![Advanced attribute configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098073351.png)\n\n\u003Ccenter>\u003Ci>Figure 24: Advanced attribute configuration\u003C/i>\u003C/center>\u003Cbr>\n\nClick that link, and ensure that the Name “ID” is both “Primary Key” and “Required,” and that “externalID” is also “Required.” These attributes both refer to a unique user ID generated by Entra ID. However, although the “id” itself is required, it is not consistently provided within the API calls. Therefore, GitLab relies on the “externalID” to ensure the proper connection between the Entra ID and GitLab user accounts. \n\n![Required attribute list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098073351.png)\n\n\u003Ccenter>\u003Ci>Figure 25: Required attribute list\u003C/i>\u003C/center>\u003Cbr>\n\nSave these settings, and then close the “Attribute Mapping” page with the “X” in the top right of the window. Return to the \"Application Provisioning\" section and click **Start provisioning**. \n\nWithin GitLab, we need to configure the association between the group we configured within Entra ID and the level of access we want those users to have within the GitLab top-level group. Note that this association can be configured on each sub-group within GitLab for more extensive provisioning, but within GitLab, permissions flow downhill. Whatever permission you set for a user at a top-level group, or sub-group, will cascade down to all projects and groups contained therein. \n\nWithin the \"Settings\" portion of the GitLab menu, select **SAML Group Links**. Here is where you’ll configure the group name and determine what access level, or role, members of the Entra ID Group will have within this particular GitLab Group.\n\n![GitLab SAML Group link](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image27_aHR0cHM6_1750098073352.png)\n\n\u003Ccenter>\u003Ci>Figure 26: GitLab SAML Group link\u003C/i>\u003C/center>\u003Cbr>\n\nAs shown in Figure 26, I’ve configured my membership to The Academy such that any users within the dev-security group from Entra ID  will be granted Developer access. Note that this is a slight variation of what a typical production environment would look like. In most instances, the user account within the identity provider (Entra ID, in this case) would match the user’s corporate account email (and we wouldn’t require “other” emails). When configured properly, if the user does not already have an account on GitLab, one will be created for them tied to their SSO account. \n\n![GitLab SSO tutorial - image11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098073352.png)\n\n\u003Ccenter>\u003Ci>Figure 27: SAML Group Links configured\u003C/i>\u003C/center>\u003Cbr>\n\nNow that you’ve completed the configuration, give it a try! From another browser, preferably in private mode to ignore any cookies or other yummy artifacts, paste the link for the GitLab SSO URL found in the GitLab SAML configurations. You should be prompted to log in with your Entra ID credentials and gain the proper access to your GitLab group! \n\nCongratulations, you’ve made it! I hope you’ve learned from and appreciate the work here, and we can all rejoice in the fact that the users within the Play-Dough app can now all properly authenticate, with the right permissions, to The Academy!\n\n> Don't have a GitLab account? [Sign up for a free, 60-day trial today](https://about.gitlab.com/free-trial/devsecops/).\n\n## Read more\n- [The ultimate guide to enabling SAML and SSO on GitLab.com](https://about.gitlab.com/blog/the-ultimate-guide-to-enabling-saml/)\n- [SAML SSO for GitLab.com groups documentation](https://docs.gitlab.com/ee/user/group/saml_sso/)","security",[684,895,482,772,9],{"slug":898,"featured":6,"template":688},"how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id","content:en-us:blog:how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id.yml","How To Gitlab Single Sign On With Saml Scim And Azures Entra Id","en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id.yml","en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id",{"_path":904,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":905,"content":911,"config":916,"_id":918,"_type":14,"title":919,"_source":16,"_file":920,"_stem":921,"_extension":19},"/en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets",{"title":906,"description":907,"ogTitle":906,"ogDescription":907,"noIndex":6,"ogImage":908,"ogUrl":909,"ogSiteName":673,"ogType":674,"canonicalUrls":909,"schema":910},"How to scan a full commit history to detect sensitive secrets","Use GitLab Secret Detection to scan a repository's commit history, including branches. View results within the GitLab UI with just a few lines of code added to a pipeline file.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097948/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%281%29_2XDPsbkjQ3o6tcdom6IGxI_1750097948673.png","https://about.gitlab.com/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to scan a full commit history to detect sensitive secrets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2025-02-06\",\n      }",{"title":906,"description":907,"authors":912,"heroImage":908,"date":913,"body":914,"category":895,"tags":915},[747,829],"2025-02-06","Secrets left exposed in outdated repositories pose significant risk for data breaches. For example, a still-active secret key can be exposed, leaving it vulnerable to exploitation. Secrets include access keys, API tokens, private keys, and other sensitive values. \n\nIn this article, you'll learn how to use GitLab Secret Detection to scan a repository’s full commit history, including all branches, to detect sensitive secrets. In addition, you will discover how to view the results directly within the GitLab UI without the need for any integration. All it takes is just a couple of lines of code in your `.gitlab-ci.yml` pipeline file. \n\n## Scan every corner of your repository\n\nWe will use the sample repository shown in the screenshot below as an example. To keep things simple, there is only a `README.md` file present in the default branch of this repository. \n\n![Sample repository to scan](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097955851.png)\n\nAt first glance, it may seem like the repository is empty and that there are probably no sensitive secrets in this repository. But what we are looking at is only the state of the default branch, which is the main branch in this example. There could be feature branches in this repository created weeks, months, or years ago with sensitive secrets. It is also possible that a file with a secret was accidentally pushed to the repo and then deleted right after. However, it likely was not deleted correctly and is still in the commit history.\n\nWe are going to enable GitLab Secret Detection scanner and set the `SECRET_DETECTION_HISTORIC_SCAN` variable to **true** so that the content of all branches in the repository is scanned.\n\n![Enable GitLab Secret Detection variable to true](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097955853.png)\n\n```\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\nsecret_detection:\n  variables:\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n```\n\nBy setting the `SECRET_DETECTION_HISTORIC_SCAN` variable to **true**, GitLab Secret Detection looks into every branch and commit of your repository. It ensures that no sensitive information — whether from a feature branch or an old commit — is left unchecked.\n\n## Results of the scan\n\nTwo sensitive secrets were identified in the repository. One is a password in a `.env` file that was deleted from the repository, but the commit containing it was not removed from the git history. The other is an AWS Access Token found in a feature branch. These exposed secrets could compromise the organization’s security. \n\n![AWS Access Token screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097955855.png)\n\nYou can click on the AWS Access Token result to see more details, including the file location. You can also create a GitLab issue to triage the vulnerability with one click. If you’re using the Jira integration, you can create a Jira ticket directly from the vulnerability page as well.\n\n## Why scanning for secrets matters\n\nAnyone with access to the repository can misuse the secret to gain unauthorized access to private resources and sensitive data. \n\nIn addition to scanning a repository’s full commit history across all branches, GitLab Secret Detection also helps you take a multilayered approach to detecting secrets:\n\n* [Secret push protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/index.html) - scans commits for secrets during a push and blocks it if secrets are detected, unless skipped, reducing the risk of leaks.  \n* [Pipeline secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/index.html) - scans files after they’ve been committed and pushed to a GitLab repository.\n* [Client-side secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/client/index.html) - scans comments and descriptions in issues and merge requests for secrets before they're saved to GitLab.  * [Automatic response to leaked secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html) - automatically revokes certain types of leaked secrets and notifies the partner that issued the secret. \n\nYou can adjust pipeline secret detection to suit your needs by modifying, extending, or replacing the default ruleset. For instance, you can define [custom rules](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/index.html#customize-analyzer-rulesets) using regex patterns to detect sensitive data like credit card numbers, phone numbers, or other information specific to your organization.\n\n## Try GitLab Secret Detection\n\n1. [Enable](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#enable-the-analyzer) Secret Detection in your GitLab pipeline.  \n2. Set `SECRET_DETECTION_HISTORIC_SCAN: true`.  \n3. Push and trigger a pipeline to scan all branches and commits.\n\nGitLab makes securing your code simple and comprehensive. Don’t let an old branch or commit compromise your security — give historical scans a try today!\n\n> #### [Sign up for a free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/) to get started with security scanners like Secret Detection.",[9,684,482,729],{"slug":917,"featured":6,"template":688},"how-to-scan-a-full-commit-history-to-detect-sensitive-secrets","content:en-us:blog:how-to-scan-a-full-commit-history-to-detect-sensitive-secrets.yml","How To Scan A Full Commit History To Detect Sensitive Secrets","en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets.yml","en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets",{"_path":923,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":924,"content":930,"config":937,"_id":939,"_type":14,"title":940,"_source":16,"_file":941,"_stem":942,"_extension":19},"/en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"title":925,"description":926,"ogTitle":925,"ogDescription":926,"noIndex":6,"ogImage":927,"ogUrl":928,"ogSiteName":673,"ogType":674,"canonicalUrls":928,"schema":929},"How we reduced MR review time with Value Stream Management ","The GitLab engineering team leverages VSM to pinpoint bottlenecks in the merge request review process and streamline software delivery. See how we do it and what we've learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097876/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097875817.png","https://about.gitlab.com/blog/how-we-reduced-mr-review-time-with-value-stream-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we reduced MR review time with Value Stream Management \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2025-02-20\",\n      }",{"title":925,"description":926,"authors":931,"heroImage":927,"date":933,"body":934,"category":681,"tags":935},[932],"Haim Snir","2025-02-20","At GitLab, we're passionate about using our own products internally, a.k.a. dogfooding. Dogfooding has led to significant improvements in accelerating our software delivery cycle time for customers. This article spotlights a specific use case where [GitLab Value Stream Management (VSM)](https://about.gitlab.com/solutions/value-stream-management/) has driven significant improvements for our engineering team. You'll learn how VSM helped us tackle two critical challenges: measuring the journey from idea conception to merge request completion, and streamlining our deployment workflows.\n\n## The Challenge: Identifying bottlenecks in MR reviews\n\nDespite having well-defined workflows, one team noticed that MRs were taking longer than expected to be reviewed and merged. The challenge wasn’t just about the delays themselves, but about understanding *where* in the review process these delays were happening and *why*.\n\nOur team’s goal was clear:\n\n- Identify where time was being spent from the initial idea to the final merge of an MR.  \n- Pinpoint specific bottlenecks in the review process.  \n- Understand how MR size, complexity, or documentation quality affect review time.\n\n## The Approach: Measures the MR review time in GitLab Value Stream Analytics\n\nValue Stream Analytics (VSA) enables organizations to map their entire workflow from idea to delivery, distinguishing between value-adding activities (VA) and non-value-adding activities (NVA) in the process flow. By calculating the ratio of value-added time to total lead time, the team can identify wasteful activities resulting in delays in MR reviews.\n\nTo obtain the necessary metrics, the team customized GitLab VSA to gain better visibility into our MR review process.\n\n### 1. Setting up a custom stage for MR review\n\nThe team added a [new custom stage](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) in VSA called **Review Time to Merge** to specifically track the time from when a reviewer was first assigned to when the MR was merged.\n\n* Start event: MR first reviewer assigned  \n* End event: MR merged\n\nBy defining this stage, VSA began measuring the duration of the MR review process, giving us precise data on where time was being spent.\n\n![Defining stage of VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097883929.png)\n\n### 2. Using the Total Time Chart for clarity\n\nWith the custom stage in place, the team used the [**Total Time Chart** on the VSA Overview page](https://about.gitlab.com/blog/value-stream-total-time-chart/) (**Analyze > Value Stream**) to visualize how much time was spent during the new MR Review stage. By comparing the values represented by each area on the chart, the team could quickly identify how this stage contributed to the total software delivery lifecycle (SDLC) time.\n\n![total time chart for VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097883930.png)\n\n### 3. Drilling down for deeper insights\n\nTo investigate specific delays, the team used the **Stage Navigation Bar** to dive deeper into the MR Review stage. This view allowed them to:\n\n- Sort MRs by review time: The stage table showed all related MRs, sorted by review duration, making it easy to detect slow MRs.  \n- Analyze individual MRs: For each MR, that team could examine factors such as reviewer assignment delays, multiple rounds of feedback, idle time after approval, and MR size/complexity.\n\n## The outcome: Actionable insights and improvements\n\nBy customizing VSA to track [MR review time](https://docs.gitlab.com/user/project/merge_requests/reviews/), the team uncovered several key insights:\n\n- **Delays in reviewer assignment:** Some MRs experienced delays because reviewers were assigned late, or reviewers had too many MRs in their queue.  \n- **Slow review start times:** Even after assignment, certain MRs sat idle before reviews began, often due to context switching or competing priorities.  \n- **Multiple feedback loops:** Larger MRs often required multiple rounds of feedback, which extended review time significantly.  \n- **Idle time post-approval:** Some MRs were approved but not merged promptly, often due to deployment coordination issues.\n\nFor the engineering manager on the team, VSA proved to be transformational/valuable in managing their team's workflow: *\"I've used the VSA to justify where we were spending time in MR completion. We have VSA customized to our needs, and it's been very beneficial to our investigations for opportunities for improvements.”* \n\nAlso, from this dogfooding experience, we’re now developing a key enhancement to improve visibility into the review process. We're adding a new event to VSA — [Merge request last approved at](https://gitlab.com/gitlab-org/gitlab/-/issues/503754) — which creates a stage that breaks down MR review steps even further for granular visibility.\n\n## The power of data-driven decisions\n\nBy leveraging GitLab’s VSA, we didn’t just identify bottlenecks – we gained actionable insights that led to measurable improvements in MR review time and overall developer productivity. We optimized merge request review cycles and increased developer throughput, validating our commitment to continuous improvement through measurement.\n\n> Want to learn more about how VSA can help your team? [Start a free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), customize your value streams, and see how you can make improvements throughout the SDLC for your teams. Then, make sure to [share your feedback and experiences in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/520962).\n\n## Read more\n\n- [Optimize value stream efficiency to do more with less, faster](https://about.gitlab.com/the-source/platform/optimize-value-stream-efficiency-to-do-more-with-less-faster/)\n- [New Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Value stream analytics documentation](https://docs.gitlab.com/user/group/value_stream_analytics/)\n- [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n",[685,729,482,936,9],"workflow",{"slug":938,"featured":6,"template":688},"how-we-reduced-mr-review-time-with-value-stream-management","content:en-us:blog:how-we-reduced-mr-review-time-with-value-stream-management.yml","How We Reduced Mr Review Time With Value Stream Management","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management.yml","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"_path":944,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":945,"content":951,"config":956,"_id":958,"_type":14,"title":959,"_source":16,"_file":960,"_stem":961,"_extension":19},"/en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"title":946,"description":947,"ogTitle":946,"ogDescription":947,"noIndex":6,"ogImage":948,"ogUrl":949,"ogSiteName":673,"ogType":674,"canonicalUrls":949,"schema":950},"Provision group runners with Google Cloud Platform and GitLab CI","This tutorial will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098300/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_623844718_4E5Fx1Q0DHikigzCsQWhOG_1750098300048.jpg","https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Provision group runners with Google Cloud Platform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-19\",\n      }",{"title":946,"description":947,"authors":952,"heroImage":948,"date":953,"body":954,"category":681,"tags":955},[746,747],"2024-11-19","Are you interested in hosting your own servers to run your GitLab CI/CD pipelines but don’t know where to begin? Setting up a GitLab Runner to run your pipelines on your own infrastructure can seem like a daunting task as it requires infrastructure knowledge and the know-how to maintain that infrastructure. Typically this process requires the provision of infrastructure, the installing of dependency, and testing that it works with your GitLab instance.\n\nThis article highlights how easy it is to easily spin up a GitLab Runner of your own utilizing GitLab’s Google Cloud Integration. Follow this tutorial and it will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes!\n\nYou will learn how to:\n\n- Create a new group runner.\n- Configure the new group runner’s tags and description.\n- Register the new group runner by adding in configurations.\n- Provision the GitLab Runner utilizing `gcloud cli` and Terraform.\n- Have your GitLab Runner pick up its first GitLab CI job.\n\n## Prerequisites\n- A terminal with Bash installed\n- Owner access on a Google Cloud Platform project\n- Terraform (or OpenTofu) [Version 1.5](https://releases.hashicorp.com/terraform/1.5.7/) or greater \n- [gcloud CLI](https://cloud.google.com/sdk/docs/install) \n- 10 minutes\n\n## Tutorial\n1. Create a new group runner under __Build > Runners > New Group Runner__.\n\n__Note:__ Navigate to the group level.\n\n![GitLab Runner setup screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098317126.png)\n\n2. Configure the new group runner's tags, description, and any additional configurations.\n\n![New Group Runner setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098317127.png)\n\n3. Select __Google Cloud__.\n\n![Select Google Cloud screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098317129.png)\n\n4. Copy your project ID from Google Cloud Platform.\n\n![Copy project ID from GCP screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098317131.png)\n\n5. Fill out your Google Cloud project ID and choose a region, zone, and type of machine you want to use.\n\n![Screen to fill out Google Cloud information](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098317132.png)\n\n6\\. Once this information is filled out, click **Setup instructions**.\n\nRun the bash script provided in Step 1 above.\n\n**Note:** This script was saved to a file called `setup.sh` for ease of use. You may copy this right into your terminal if you are confident in debugging.\n\n![Setup instructions screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098317134.png)\n\n![Script for GitLab Runner](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098317135.png)\n\n7\\. Create a `main.tf` file and follow the instructions in GitLab.\n\n**Note:** If you want to use OpenTofu instead of Terraform, you can still copy the code and only have to adjust the Terraform commands for applying the configuration. \n\n![Install and register GitLab Runner screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098317136.png)\n\nOnce successfully provisioned, you should be see the following:\n\n![GitLab Runner code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098317137.png)\n\n8\\. If you close the instructions and click the **View runners** button, you will now have a newly provisioned runner present with \"Never contacted\" as its status.\n\n![Newly provisioned runner on screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098317139.png)\n\n9\\. In any project, add the following `.gitlab-ci.yml`.\n\n```  \nstages:  \n  - greet\n\nhello_job:  \n  stage: greet  \n  tags:  \n    - gcp-runner  \n  script:  \n    - echo \"hello\"  \n```\n\nVolia! You have set up your first GitLab Runner utilizing Google Cloud Platform.\n\n# Next steps\n\nNow that you have provisioned your very own GitLab Runner, consider optimizing it for your specific use case. Some things to consider with your runner moving forward:\n\n- Is the runner I provisioned the right size? Does it need additional resources for my use case? \n- Does the GitLab Runner contain all the dependency my builds need?  \n- How can I store the GitLab Runner as infrastructure as code?\n\n> Make sure to bookmark the [Provisioning runners in Google Cloud documentation](https://docs.gitlab.com/ee/ci/runners/provision_runners_google_cloud.html) for easy reference.\n",[684,482,708,109,9,751,231],{"slug":957,"featured":6,"template":688},"provision-group-runners-with-google-cloud-platform-and-gitlab-ci","content:en-us:blog:provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","Provision Group Runners With Google Cloud Platform And Gitlab Ci","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"_path":963,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":964,"content":970,"config":976,"_id":978,"_type":14,"title":979,"_source":16,"_file":980,"_stem":981,"_extension":19},"/en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"title":965,"description":966,"ogTitle":965,"ogDescription":966,"noIndex":6,"ogImage":967,"ogUrl":968,"ogSiteName":673,"ogType":674,"canonicalUrls":968,"schema":969},"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform","Use this tutorial as a great starting point to manage your cluster entirely through GitOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665989/Blog/Hero%20Images/AdobeStock_618473457.jpg","https://about.gitlab.com/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2024-01-31\",\n      }",{"title":965,"description":966,"authors":971,"heroImage":967,"date":973,"body":974,"category":681,"tags":975},[747,972],"Siddharth Mathur","2024-01-31","This tutorial will walk you through setting up a Google Kubernetes Engine (GKE) Cluster with ArgoCD pre-installed, utilizing Terraform, in less than 10 minutes. This will be a great starting point to manage your cluster entirely through GitOps.\n\n### Prerequisites\n- GCP account with permissions to provision a GKE Cluster\n- Kubectl client v1.23.9\n- Kubectl server v1.23.16-gke.1400\n- Working knowledge of GKE\n- Basic knowledge of ArgoCD\n\n#### An overview of this tutorial is as follows:\n- Set up the GitLab Terraform GKE ArgoCD Template \n- Connect to your GKE Cluster\n- Grab the ArgoCD Initial Admin Secret\n- Log into ArgoCD \n- Enjoy your Kubernetes Cluster with ArgoCD!\n\n#### Set up the GitLab Terraform GKE ArgoCD template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project).\n\nTo import the project:\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n    - [GitLab Terraform GKE ArgoCD](https://gitlab.com/demos/infrastructure/gitlab-terraform-gke-argocd)\n6. Complete the fields and select **Create project**.\n\n#### Add in your cloud credentials to CI/CD variables\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json\n```\n\n4. Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n\n![simpleargocd - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_1.png)\n\n#### Run GitLab CI to deploy your Kubernetes cluster with ArgoCD Installed.\n\n![simpleargocd - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_2.png)\n\n#### Connect to your GKE Cluster through your terminal using the following bash command.\n\n```bash\ngcloud container clusters get-credentials gitlab-terraform-gke-argocd --region us-central1 --project \u003Cproject-name>\n```\n\n![simpleargocd-image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd-image3.png)\n\n#### Expose the Initial Admin Secret through your terminal using the following bash command. Make sure you save this password for later.\n\n```bash\nkubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=\"{.data.password}\" | base64 -d\n```\n\n#### Port Forward ArgoCD to your localhost 8080 through your terminal using the following bash command. Go to Chrome localhost:8080 afterwards.\n\n```bash\nkubectl port-forward svc/argocd-server -n argocd 8080:443\n```\n\n#### Enter your admin and `Initial Admin Secret` to the login page.\n\n![simpleargocd - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_4.png)\n\n#### Voila! You've bootstrapped your GKE cluster with ArgoCD. Enjoy your GitOps!\n\n![simpleargocd - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_5.png)\n\n### Next steps\nWe recommend reviewing [setting up Review Ops with ArgoCD](https://about.gitlab.com/blog/how-to-provision-reviewops/)! \n\n### References\n- [GitLab Learn Labs - Infrastructure Webinar](https://gitlab.com/gitlab-learn-labs/webinars/infrastructure/gitlab-terraform-gke-argocd)\n- [Getting started with ArgoCD](https://argo-cd.readthedocs.io/en/release-2.0/getting_started/)\n\n### Related posts\n- [Simple Kubernetes management with GitLab](https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab/)\n- [How to provision ReviewOps](https://about.gitlab.com/blog/how-to-provision-reviewops/)\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n",[537,855,109,772,9],{"slug":977,"featured":6,"template":688},"quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","content:en-us:blog:quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","Quick Setup Of A Gke Cluster With Argocd Pre Installed Using Terraform","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"_path":983,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":984,"content":990,"config":998,"_id":1000,"_type":14,"title":1001,"_source":16,"_file":1002,"_stem":1003,"_extension":19},"/en-us/blog/second-law-of-complexity-dynamics",{"title":985,"description":986,"ogTitle":985,"ogDescription":986,"noIndex":6,"ogImage":987,"ogUrl":988,"ogSiteName":673,"ogType":674,"canonicalUrls":988,"schema":989},"How pursuit of simplicity complicates container-based CI","Simplicity always has a certain player in mind - learn how to avoid antipatterns by ensuring simplicity themes do not compromise your productivity by over-focusing on machine efficiencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/second-law-of-complexity-dynamics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"When the pursuit of simplicity creates complexity in container-based CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":991,"description":986,"authors":992,"heroImage":987,"date":993,"body":994,"category":681,"tags":995},"When the pursuit of simplicity creates complexity in container-based CI pipelines",[703],"2022-05-24","\n\nIn a GitLab book club, I recently read \"[The Laws of Simplicity](http://lawsofsimplicity.com/),\" a great book on a topic that has deeply fascinated me for many years. The book contains an acronym that expresses simplicity generation approaches: SHE, which stands for \"shrink, hide, embody.\" These three approaches for simplicity generation all share a common attribute: They are all creating illusions - not eliminations.\n\nI've seen this illusion repeat across many, many realms of pursuit for many years. Even in human language, vocabulary development, jargon, and acronyms all simply encapsulate worlds of complexity that still exist, but can be more easily referenced in a compact form that performs SHE on the world of concepts.\n\nAny illusion has a boundary or curtain where in front of the curtain the complexity can be dealt with by following simple rules, but, behind the curtain, the complexity must be managed by a stage manager. \n\nFor instance, when the magic show creates the spectre of sawing people in half, what appears to be a simple box is in fact an exceedingly elaborate contraption. Not only that, but the manufacturing process for an actual simple box and the sawing box are markedly different in terms of complexity. The manufacturing of complexity and its result are essentially the tradeoff for what would be the real-world complexity of actually sawing people in half and having them heal and stand up unharmed immediately afterward.\n\nTo bring this into the technical skills realm, consider that when you leverage a third-party component or API to add functionality, you only need to know the parameters to obtain the desired result. The people maintaining that component or API must know the quantum mechanics detail level of how to perform that work in a reliable and complete way.\n\nDocker containers are a mechanism for embodying complexity, and are used in scaled applications and within container-based CI. When a [CI/CD](/topics/ci-cd/) automation engineer uses container-based CI, it is possible to make things more complex and more expensive when attempting to do exactly the opposite.\n\nAt its core, this post is concerned with how it can happen that pursuing a simpler world through containers can turn into an antipattern - a reversal of desired outcomes - many times, without us noticing that the reversal is affecting our productivity. The prison of a paradigm is secure indeed.\n\n### The Second Law of Complexity Dynamics\n\nOver the years I have come to believe that the pursuit of reducing complexity has similar characteristics to [The Second Law of Thermodynamics](https://www.grc.nasa.gov/www/k-12/airplane/thermo2.html). The net result of a change between mass and energy results in the the same net amount of mass and energy, but their ratio and form have changed. In what I will coin \"The Second Law of Complexity Dynamics,\" complexity is similarly \"conserved,\" it is just reformed.\n\nIf complexity is not eliminated by simplifying efforts, we reduce its impact in a given realm by changing the ratio of complexity and simplicity on each side of one or more curtains. But alas, complexity did not die, it just hid and is now someone else's management challenge. It is important not to think of this as cheating. There is no question that hiding complexity carries the potential for massive efficiency gains when the world behind the hiding mechanisms becomes the realm of specialty skills and specialists. When it truly externalizes the complexity management for one party, the world becomes more simple for that party.\n\nHowever, the devil is in the details. If the hypothesis of \"no net elimination of complexity\" is correct, it is then important where the complexity migrates to. If it migrates to another part of the same process that must also be managed by the same people, then it may not result in a net gain of efficiency. If it migrates out of a previously embodied realm, then, in the pusuit of simplicity, we can actually reduce our overall efficiency when the process is considered as a whole.\n\n### Container-based CI pipelines as a useful case in point\n\nI see the potential for efficiency reversals to crop up in my daily work time and again, and an interesting place where I've seen it lately is in the tradeoff of linking together hyper-specialized modules of code in containers for CI versus leveraging more generalized modules.\n\nIn creating container-based pipelines, I experience the potential for an efficiency reversal I have to consciously manage.\n\nContainers make a simplicity tradeoff by design. They create a full runtime environment for a very single purpose but in doing so they strip back the container internals so far that general compute tasks are difficult inside them. If you step behind their \"complexity embodying\" curtain into the container, their simplistic environment can require more complex code to operate within.\n\nIn GitLab CI pipelines that utilize containers, all the scripts of jobs run inside the containers that are specified as their runtime environment. When one selects a specialized container - such as the alpine git container or the skopeo image management container - the code is subject to the limitations of the shell that container employs (if it has one at all).\n\nContainers were devised to be hyper-specialized, purpose-specific runtimes that assure they can always run and run quickly for scaled applications. However, for many containers this means no shell or a very stripped back shell like busybox sh. It frequently also means not including the package manager for the underlying Linux distribution.\n\nTime and again, I've found myself degrading the implementation of my shell code in key ways that make it more complex, so that it can run under these stripped back shells. In these cases, I do not benefit from the complexity hiding of newer versions of advanced shells like Bash v5. One of the areas is advanced Bash shell expansions, which embody a huge world of complex parsing and avoid a bunch of extraneous utilities. And another is advanced if and case statement comparison logic that processes regular expressions without external utilities and performs many other abstracted comparisons. There are many other areas of the language where this comes into play, but these two stand out.\n\n![](https://about.gitlab.com/images/blogimages/second-law-of-complexity-dynamics-container-pipeline-tradeoffs.png)\n\nSo by having a simpler shell like busybox sh, the simplicities of advanced shell features become *unhidden* and join my side of the curtain. Now I have to manage them in my code. But then, guess what? No package manager means the inability to install other Linux utilities and languages extensions that I could also employ to push that same complexity back out of my space. And, of course, it means installing Bash v5 would be difficult as well.\n\nSo the simplicity proposition of a tightly optimized purpose-specific container can reverse the purported efficiency gains in the very important realm of the code I have to write. It also means I frequently have to break up my code into multiple jobs to utilize the specializations of these containers in a sequence or to transport the results of a specialized container into a fuller coding environment. This increases the complexity of the pipeline as I now have to pass artifacts and variable data from one job to another with a host of additional YAML directives, and sometimes deploy infrastructure (e.g., [Runner caching](https://docs.gitlab.com/ee/ci/caching/#:~:text=For%20runners%20to%20work%20with,GitLab.com%20behave%20this%20way)).\n\nIn the case of CI using containers, when the simplicity tradeoffs move complexity to things I do not maintain, such as base containers, operating system packages, and full shell environments, into things I do maintain, such as CI YAML and Shell Script code, then I am also inheriting long-term complexity maintenance. In the cloud, we know this as undifferentiated heavy lifting.\n\nInterestingly, the proliferation of specialized containers can also require more machine resources and can lengthen processing time as containers are retrieved from registries and loaded and artifacts and source code are copied in and out of each job-based container.\n\n### Simplicity target: Efficiency\n\nIt's easy to lose sight of the amount of human effort and ingenuity being applied to knowing and managing the coding structure, rather than being applied to solving the real automation problems of the CI pipeline. The net complexity of the pipeline can also mean it is hard to maintain an understanding of it even if you are working in it every day - and for newcomers onboarding, it can be many weeks before they fully understand how the system works.\n\nOf course, I can create my own containers for CI pipelines, but now I've added the complexity of container development and continuous updates of the same in order for my pipeline code to be operational and stay healthy. I am still behind the curtain for that container. For teams whose software is not itself containerized, the prospect of learning to build containers just for CI can create a lot of understandable friction to adopting a container-based CI development process. This friction may be unnecessary if we make a key heuristic adaptation.\n\n### Walking the tightwire above the curtain\n\nSo how do I manage the tensions of these multiple worlds of complexity when it comes to container-based pipelines to try to avoid efficiency reversals in the net complexity of the pipeline?\n\nIt is simple. I will describe the method and then the key misapplied heuristic and how to adjust it.\n\n1. I hold that the primary benefits of container-based CI are a) dependency isolation by job (so that you don’t have a massive and brittle CI build machine specification to handle all possible build requirements), and b) clean CI build agent state by obtaining a clean container copy for each job. These benefits do not imply having to abide by microservices container resource planning and doing so is what creates an antipattern in my productivity.\n\n2. I frequently use a Bash 5 container (version pegged if need be) where all the complexity that advanced shell capabilities embody for me stay behind the curtain.\n\n3. Instead of running a hyper-minimalized container for a given utility, I do a runtime install of that utility (gasp!) in a container that has my rich shell. I utilize version pegging during the install if I feel version safety is paramount on the utility. Alternatively, if a very desirable runtime of some type is difficult to setup and does not have a package, I look for a container that has a package manager that matches a packaged version of the runtime and also allows me to install my advanced scripting language if needed.\n\n4. If, and only if, the net time of the needed runtime installs exceeds the net pipeline time to load a string of specialized containers (with artifact handling) plus my time to develop and manage a pipeline dependency in the form of a custom container, then do I consider possibly creating a pipeline specific container.\n\n5. Through this process a balancing principle also emerges. Since I have been doing runtime installs as a development practice, I have actually already MVPed what a pipeline specific container would need to have installed. I can literally copy the installation lines into a Docker file if I wish. I can also notice if I have commonality across multiple pipelines where it makes sense to create a multi-pipeline utility container.\n\nIn a recent project, following these principles caused me to avoid the skopeo container and instead install it on the Bash 5 container using a package manager.\n\nIf your team is big into Python or PowerShell as your CI language, it would make sense to start with recent releases of those containers. The point is not advanced Bash -but an advanced version of your general CI scripting language that prevents you from creating work arounds in your code for problems that are well-solved in publicly available runtimes.\n\nKeep in mind that this adjustment is very, very focused on containers **in CI pipelines**, which, by nature, reflect general compute processing requirements where many vastly different operations are required in a pipeline. I am not advocating this approach for true microservices applications where, by design, a given service has very defined purpose and characteristics and, at scale, massively benefits from the machine efficiency of hyper-minimalized, purpose-specific granularity.\n\n### Misapplied heuristics\n\nFrequently when a pattern has an inflection point at which it becomes an antipattern, it is due to misapplying the heuristics of the wrong realm. In this case, I believe, that normal containerization patterns for microservices apps are well founded, but they apply narrowly to \"engineered hyper-specialized compute\" of a granule we call \"a microservice\" (note the word \"micro\" applies to the scope of compute activities). Importantly, they apply because the process itself is designed as hyper-specialized around a very specific task. The container contents (included dependencies), immutability principle (no runtime change), and the runtime compute resources can be managed exceedingly minimally because of the small and highly specific scope of computing activities that occur within the process.\n\nThis is essentially the embodiment of the 12 Factor App principle called “[VIII. Concurrency](https://12factor.net/concurrency),” which asserts that scaling should be horizontal scaling of the same minimalized process, not vertical scaling of compute resources inside a given process. If the system experiences 10x work for a particular activity, we create 10 processes, we do not request 10x memory and 10x CPU within one running process. Microservices architecture tightly controls the amount of work in each request so that it is hyper-predictable in its compute resource requirements and, therefore, scalable by adding identical processes.\n\nCI compute, by nature, is the opposite of hyper-specialized. Across build, test, package, deploy, etc., etc., there are many huge variations in required machine resources of memory, CPU, network I/O and high-speed disk access and, importantly, included dependencies. The generalized compute nature also occurs due to varying inputs so the same defined process might need a lot more resources due to the nature of the raw input data. For example, varying input volume (e.g. a lot versus few data items) or varying input density (e.g. processing binary files versus text files). \n\nIt is the process that is being containerized that holds the attribute of generalized compute (bursty on at least some compute resources) or hyper-specialized (narrow definition of work to be done and therefore well-known compute resources per unit of completed work). Containerizing a process that exhibits generalized compute requirements is useful, but planning the resources of that container as if containerizing it has transformed the compute requirements into hyper-minimalized is the inflection point at which it becomes an antipattern, actually eroding the sought-after benefits we set out to create.\n\nIn the model I employ for leveraging containers in CI, the loosening of the hyper-specialization, immutablility (no-runtime installs), and very narrow compute resources principles of microservices simply reflects the real world in that CI compute as a whole exhibits the nature of generalized, not hyper-specialized, compute characteristics.\n\n> Another realm where this seems true is desired state configuration management technologies - also known as “Configuration as Code”. It is super simple if there are pre-existing components or recipes for all that you need to do but as soon as you have to build some for yourself, you enter a world of creating imperative code against a declarative API boundary (there's the \"embodiment\" curtain - the declarative API boundary). Generally, if you have not had to implement imperative code to process declaratively, this new world takes some significant experience to become proficient.\n\n### Iterating SA: Experimental improvements for your next project\n\n1. In general, favor simplicity boundaries that reduce your work, especially in the realm of undifferentiated heavy lifting. In the realm of container-based CI, this includes having a rich coding language and a package manager to acquire additional complexity embodying utilities quickly and easily.\n\n2. In general, be suspicious of an underlying antipattern if you have to spend an inordinate amount of time coding and maintaining workarounds in the service of simplicity. In the realm of container-based CI, this would be containers that are ultra-minimalized around microservices performance characteristics when they don’t hyper-scale as a standing service within CI.\n\n3. In general, stand back and examine the net complexity of the code and frameworks that will have to be maintained by yourself or your team and check if you’ve made tradeoffs that have a net negative tax on your efficiency. When complexity that can be managed by machines enters your workspace at high frequency, then you have a massive antipattern of human efficiency.\n\n4. It is frequent that when the hueristics being applied create negative human efficiency they also create negative machine efficiency. Watch for this effect in your projects. The diagram in the post shows that over-minimalized containers can easily lead to using a lot more of them - all of which has machine overhead as well.\n\nIf the above resonates, CI pipeline engineers might want to consider loosening the \"microservices\" heuristics of hyper-specialization, ultra-minimalization,  and immutability (no dynamic installs) for CI pipeline containers in order to ensure that the true net complexity level of the code they have to maintain is in balance and their productivity is preserved.\n\n### Appendix: Working examples of this idea\n\n- [AWS CLI Tools in Containers](https://gitlab.com/guided-explorations/aws/aws-cli-tools) has both Bash and PowerShell Core (on Linux OS) available so that one container set can suit the automation shell preference of both Linux and Windows heritage CI automation engineers.\n\n- CI file [installs yq dynamically](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L47-48) in the Bash container, but then [only installs the heavier jq and skopeo](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L63) if needed by the work implied, which demonstrates a way to be more efficient even when runtime installs are desired.\n\n- [Bash and PowerShell Script Code Libraries in Pure GitLab CI YAML](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/script-code-libraries-in-pure-gitlab-ci-yaml) shows how to have libraries of CI script code available to every container in a pipeline without encapsulating the libraries in a container themselves and with minimalized CI YAML complexity compared to YAML anchors, references, or extends. While the method is a little bit challenging to setup, from then on out it pays back by decoupling scripting libraries from any other pipeline artifact.\n\n- [CI/CD Extension Freemarker File Templating](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/ci-cd-plugin-extension-freemarker-file-templating) shows the install is very quick and only affects one job and still version pegs the installed utility.\n",[708,996,684,997,9],"CD","code review",{"slug":999,"featured":6,"template":688},"second-law-of-complexity-dynamics","content:en-us:blog:second-law-of-complexity-dynamics.yml","Second Law Of Complexity Dynamics","en-us/blog/second-law-of-complexity-dynamics.yml","en-us/blog/second-law-of-complexity-dynamics",{"_path":1005,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1006,"content":1012,"config":1017,"_id":1019,"_type":14,"title":1020,"_source":16,"_file":1021,"_stem":1022,"_extension":19},"/en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"title":1007,"description":1008,"ogTitle":1007,"ogDescription":1008,"noIndex":6,"ogImage":1009,"ogUrl":1010,"ogSiteName":673,"ogType":674,"canonicalUrls":1010,"schema":1011},"Secure and publish Python packages: A guide to CI integration","Learn how to implement a secure CI/CD pipeline across five stages with the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662080/Blog/Hero%20Images/AdobeStock_1097303277.jpg","https://about.gitlab.com/blog/secure-and-publish-python-packages-a-guide-to-ci-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure and publish Python packages: A guide to CI integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-01-21\",\n      }",{"title":1007,"description":1008,"authors":1013,"heroImage":1009,"date":1014,"body":1015,"category":895,"tags":1016},[678],"2025-01-21","Supply chain security is a critical concern in software development. Organizations need to verify the authenticity and integrity of their software packages. This guide will show you how to implement a secure CI/CD pipeline for Python packages using GitLab CI, incorporating package signing and attestation using Sigstore's Cosign.\n\nYou'll learn:\n\n- [Why sign and attest your Python packages?](#why-sign-and-attest-your-python-packages%3F)\n- [Pipeline overview](#pipeline-overview)\n- [Complete pipeline implementation: Setting up the environment](#complete-pipeline-implementation-setting-up-the-environment)\n   * [Environment configuration](#environment-configuration)\n   * [Configuration breakdown](#configuration-breakdown)\n-  The 6 stages\n\n    1. [Building](#building-crafting-the-package)\n    2. [Signing](#signing-the-digital-notarization)\n    3. [Verification](#verification-the-security-checkpoint)\n    4. [Publishing](#publishing-the-controlled-release)\n    5. [Publishing signatures](#publishing-signatures-making-verification-possible)\n    6. [Consumer verification](#consumer-verification-testing-the-user-experience)\n\n## Why sign and attest your Python packages?\n\nHere are four reasons to sign and attest your Python packages:\n\n* **Supply chain security:** Package signing ensures that the code hasn't been tampered with between build and deployment, protecting against supply chain attacks.\n* **Compliance requirements:** Many organizations, especially in regulated industries, require cryptographic signatures and provenance information for all deployed software.\n* **Traceability:** Attestations provide a verifiable record of build conditions, including who built the package and under what circumstances.\n* **Trust verification:** Consumers of your package can cryptographically verify its authenticity before installation.\n\n## Pipeline overview\n\nEnsuring your code's integrity and authenticity is necessary. Imagine a pipeline that doesn't just compile your code but creates a cryptographically verifiable narrative of how, when, and by whom your package was created. Each stage acts as a guardian, checking and documenting the package's provenance.\n\nHere are six stages of a GitLab pipeline that ensure your package is secure and trustworthy:\n\n* Build: Creates a clean, standard package that can be easily shared and installed.\n* Signing: Adds a digital signature that proves the package hasn't been tampered with since it was created.\n* Verification: Double-checks that the signature is valid and the package meets all our security requirements.\n* Publishing: Uploads the verified package to GitLab's package registry, making it available for others to use.\n* Publishing Signatures: Makes signatures available for verification.\n* Consumer Verification: Simulates how end users can verify package authenticity.\n\n## Complete pipeline implementation: Setting up the environment\n\nBefore we build our package, we need to set up a consistent and secure build environment. This configuration ensures every package is created with the same tools, settings, and security checks.\n\n### Environment configuration\n\nOur pipeline requires specific tools and settings to work correctly.\n\nPrimary configurations:\n\n* Python 3.10 for consistent builds\n* Cosign 2.2.3 for package signing\n* GitLab package registry integration\n* Hardcoded package version for reproducibility\n\n**Note about versioning:** We've chosen to use a hardcoded version (`\"1.0.0\"`) in this example rather than deriving it from git tags or commits. This approach ensures complete reproducibility and makes the pipeline behavior more predictable. In a production environment, you might want to use semantic versioning based on git tags or another versioning strategy that fits your release process.\n\nTool requirements:\n\n* Basic utilities: `curl`, `wget`\n* Cosign for cryptographic signing\n* Python packaging tools: `build`, `twine`, `setuptools`, `wheel`\n\n### Configuration breakdown\n\n```yaml\nvariables:\n  PYTHON_VERSION: '3.10'\n  PACKAGE_NAME: ${CI_PROJECT_NAME}\n  PACKAGE_VERSION: \"1.0.0\"\n  FULCIO_URL: 'https://fulcio.sigstore.dev'\n  REKOR_URL: 'https://rekor.sigstore.dev'\n  CERTIFICATE_IDENTITY: 'https://gitlab.com/${CI_PROJECT_PATH}//.gitlab-ci.yml@refs/heads/${CI_DEFAULT_BRANCH}'\n  CERTIFICATE_OIDC_ISSUER: 'https://gitlab.com'\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.pip-cache\"\n  COSIGN_YES: \"true\"\n  GENERIC_PACKAGE_BASE_URL: \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}\"\n```\n\nWe use caching to speed up subsequent builds:\n\n```yaml\ncache:\n  paths:\n    - ${PIP_CACHE_DIR}\n```\n\n## Building: Crafting the package\n\nEvery software journey begins with creation. In our pipeline, the build stage is where raw code transforms into a distributable package, ready to travel across different Python environments.\n\nThe build process creates two standardized formats:\n\n* a wheel package (.whl) for quick, efficient installation\n* a source distribution (.tar.gz) that carries the complete code\n\nHere's the build stage implementation:\n\n```yaml\nbuild:\n  extends: .python-job\n  stage: build\n  script:\n    - git init\n    - git config --global init.defaultBranch main\n    - git config --global user.email \"ci@example.com\"\n    - git config --global user.name \"CI\"\n    - git add .\n    - git commit -m \"Initial commit\"\n    - export NORMALIZED_NAME=$(echo \"${CI_PROJECT_NAME}\" | tr '-' '_')\n    - sed -i \"s/name = \\\".*\\\"/name = \\\"${NORMALIZED_NAME}\\\"/\" pyproject.toml\n    - sed -i \"s|\\\"Homepage\\\" = \\\".*\\\"|\\\"Homepage\\\" = \\\"https://gitlab.com/${CI_PROJECT_PATH}\\\"|\" pyproject.toml\n    - python -m build\n  artifacts:\n    paths:\n      - dist/\n      - pyproject.toml\n```\n\nLet's break down what this build stage does:\n\n1. Initializes a Git repository (`git init`) and configures it with basic settings\n2. Normalizes the package name by converting hyphens to underscores, which is required for Python packaging\n3. Updates the package metadata in `pyproject.toml` to match our project settings\n4. Builds both wheel and source distribution packages using `python -m build`\n5. Preserves the built packages and configuration as artifacts for subsequent stages\n\n## Signing: The digital notarization\n\nIf attestation is the package's biography, signing is its cryptographic seal of authenticity. This is where we transform our package from a mere collection of files into a verified, tamper-evident artifact.\n\nThe signing stage uses Cosign to apply a digital signature as an unbreakable seal. This isn't just a stamp — it's a complex cryptographic handshake that proves the package's integrity and origin.\n\n```yaml\nsign:\n  extends: .python+cosign-job\n  stage: sign\n  id_tokens:\n    SIGSTORE_ID_TOKEN:\n      aud: sigstore\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          cosign sign-blob --yes \\\n            --fulcio-url=${FULCIO_URL} \\\n            --rekor-url=${REKOR_URL} \\\n            --oidc-issuer $CI_SERVER_URL \\\n            --identity-token $SIGSTORE_ID_TOKEN \\\n            --output-signature \"dist/${filename}.sig\" \\\n            --output-certificate \"dist/${filename}.crt\" \\\n            \"$file\"\n        fi\n      done\n  artifacts:\n    paths:\n      - dist/\n```\n\nThis signing stage performs several crucial operations:\n\n1. Obtains an OIDC token from GitLab for authentication with Sigstore services\n2. Processes each built package (both wheel and source distribution)\n3. Uses Cosign to create a cryptographic signature (`.sig`) for each package\n4. Generates a certificate (`.crt`) that proves the signature's authenticity\n5. Stores both signatures and certificates alongside the packages as artifacts\n\n## Verification: The security checkpoint\n\nVerification is our final quality control gate. It's not just a check — it's a security interrogation where every aspect of the package is scrutinized.\n\n```yaml\nverify:\n  extends: .python+cosign-job\n  stage: verify\n  script:\n    - |\n      failed=0\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          if ! cosign verify-blob \\\n            --signature \"dist/${filename}.sig\" \\\n            --certificate \"dist/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\nThe verification stage implements several security checks:\n\n1. Examines each package file in the `dist` directory\n2. Uses Cosign to verify the signature matches the package content\n3. Confirms the certificate's identity matches our expected GitLab pipeline identity\n4. Validates our trusted OIDC provider issued the certificate\n5. Fails the entire pipeline if any verification check fails, ensuring only verified packages proceed\n\n## Publishing: The controlled release\n\nPublishing is where we make our verified packages available through GitLab's package registry. It's a carefully choreographed release that ensures only verified, authenticated packages reach their destination.\n\n```yaml\npublish:\n  extends: .python-job\n  stage: publish\n  script:\n    - |\n      cat \u003C\u003C EOF > ~/.pypirc\n      [distutils]\n      index-servers = gitlab\n      [gitlab]\n      repository = ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi\n      username = gitlab-ci-token\n      password = ${CI_JOB_TOKEN}\n      EOF\n      TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token \\\n        twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi \\\n        dist/*.whl dist/*.tar.gz\n```\n\nThe publishing stage handles several important tasks:\n\n1. Creates a `.pypirc` configuration file with GitLab package registry credentials\n2. Uses the GitLab CI job token for secure authentication\n3. Uploads both wheel and source distribution packages to the GitLab PyPI registry\n4. Makes the packages available for installation via pip\n\n## Publishing signatures: Making verification possible\n\nAfter publishing the packages, we must make their signatures and certificates available for verification. We store these in GitLab's generic package registry, making them easily accessible to users who want to verify package authenticity.\n\n```yaml\npublish_signatures:\n  extends: .python+cosign-job\n  stage: publish_signatures\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.sig\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.crt\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n        fi\n      done\n```\n\nThe signature publishing stage performs these key operations:\n\n1. Processes each built package to find its corresponding signature files\n2. Uses the GitLab API to upload the signature (`.sig`) file to the generic package registry\n3. Uploads the corresponding certificate (`.crt`) file\n4. Makes these verification artifacts available for downstream package consumers\n5. Uses the same version and package name to maintain the connection between packages and signatures\n\n## Consumer verification: Testing the user experience\n\nThe final stage simulates how end users will verify your package's authenticity. This stage acts as a final check and a practical example of the verification process.\n\n```yaml\nconsumer_verification:\n  extends: .python+cosign-job\n  stage: consumer_verification\n  script:\n    - |\n      git init\n      git config --global init.defaultBranch main\n      mkdir -p pkg signatures\n\n      pip download --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      pip download --no-binary :all: \\\n          --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      failed=0\n      for file in pkg/*.whl pkg/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          sig_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n          cert_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.sig\" \\\n               \"$sig_url\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.crt\" \\\n               \"$cert_url\"\n\n          if ! cosign verify-blob \\\n            --signature \"signatures/${filename}.sig\" \\\n            --certificate \"signatures/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\nThis consumer verification stage simulates the end-user experience by:\n\n1. Creating a clean environment to test package installation\n2. Downloading the published packages from the GitLab PyPI registry\n3. Retrieving the corresponding signatures and certificates from the generic package registry\n4. Performing the same verification steps that end users would perform\n5. Ensuring the entire process works from a consumer's perspective\n6. Failing the pipeline if any verification step fails, providing an early warning of any issues\n\n## Summary\n\nThis comprehensive pipeline provides a secure and reliable way to build, sign, and publish Python packages to GitLab's package registry. By following these practices and implementing the suggested security measures, you can ensure your packages are appropriately verified and safely distributed to your users.\n\nThe pipeline combines modern security practices with efficient automation to create a robust software supply chain. Using Sigstore's Cosign for signing and attestation, along with GitLab's built-in security features, you can provide users with trustworthy cryptographically verified packages.\n\n> #### Get started on your security journey today with a [free 60-day trial of GitLab Ultimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com).\n\n## Learn more\n- [Documentation: Use Sigstore for keyless signing and verification](https://docs.gitlab.com/ee/ci/yaml/signing_examples.html)\n- [Streamline security with keyless signing and verification in GitLab](https://about.gitlab.com/blog/keyless-signing-with-cosign/)\n- [Annotate container images with build provenance using Cosign in GitLab CI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd/)",[895,231,282,729,708,109,482,684,9],{"slug":1018,"featured":91,"template":688},"secure-and-publish-python-packages-a-guide-to-ci-integration","content:en-us:blog:secure-and-publish-python-packages-a-guide-to-ci-integration.yml","Secure And Publish Python Packages A Guide To Ci Integration","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration.yml","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"_path":1024,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1025,"content":1031,"config":1036,"_id":1038,"_type":14,"title":1039,"_source":16,"_file":1040,"_stem":1041,"_extension":19},"/en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale",{"title":1026,"description":1027,"ogTitle":1026,"ogDescription":1027,"noIndex":6,"ogImage":1028,"ogUrl":1029,"ogSiteName":673,"ogType":674,"canonicalUrls":1029,"schema":1030},"Structuring the GitLab Package Registry for enterprise scale","Learn how to leverage GitLab's unique project-based publishing model alongside root-group-level consumption to create a secure, flexible package management strategy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662332/Blog/Hero%20Images/blog-image-template-1800x945__23_.png","https://about.gitlab.com/blog/structuring-the-gitlab-package-registry-for-enterprise-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Structuring the GitLab Package Registry for enterprise scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":1026,"description":1027,"authors":1032,"heroImage":1028,"date":1033,"body":1034,"category":685,"tags":1035},[678],"2025-02-19","As organizations grow, managing internal packages becomes increasingly complex. While traditional package managers, like JFrog Artifactory and Sonatype Nexus, use a centralized repository approach, GitLab takes a different path that aligns with modern development teams' work. In this post, we'll explore how to effectively structure your GitLab Package Registry for enterprise scale, focusing on Maven and npm packages as examples.\n\n## Understanding the GitLab Package Registry model\n\nIf you're coming from a traditional package manager, GitLab's approach might initially seem different. Instead of a single centralized repository, GitLab integrates package management directly into your existing project and group structure. This means:\n\n- Teams publish packages to specific projects where the code lives\n- Teams consume packages from root group registries that aggregate all packages below them\n- Access control inherits from your existing GitLab permissions\n\nThis model offers several advantages:\n\n- Clear ownership of packages alongside their source code\n- Granular access control without additional configuration\n- Simplified CI/CD integration\n- Natural alignment with team structures\n- Single URL for accessing all company packages through root group consumption\n\n### The power of root group package registry\n\nWhile GitLab supports package consumption at various group levels, using the root group level has emerged as a best practice among our users. Here's why:\n\n- **Single access point:** One URL provides access to all private packages across your organization\n- **Consistent package naming:** Group-level endpoints allow teams to maintain their preferred naming conventions without conflicts\n- **Simplified configuration:** All developers can use the same configuration to access packages\n- **Secure access management:** Combines with deploy tokens for easy rotation and access control\n- **Hierarchical organization**: Naturally maps to your organizational structure while maintaining unified access\n\n## Real-world example: Enterprise structure\n\nLet's look at how this works in practice with a large enterprise:\n\n```\ncompany/ (root group)\n├── retail-division/\n│   ├── shared-libraries/     # Division-specific shared code\n│   └── teams/\n│       ├── checkout/        # Team publishes packages here\n│       └── inventory/       # Team publishes packages here\n├── banking-division/\n│   ├── shared-libraries/    # Division-specific shared code\n│   └── teams/\n│       ├── payments/       # Team publishes packages here\n│       └── fraud/         # Team publishes packages here\n└── shared-platform/        # Enterprise-wide shared code\n    ├── java-commons/      # Shared Java libraries\n    └── ui-components/     # Shared UI components\n```\n\n### Publishing configuration\n\nTeams publish packages to their specific project registries, maintaining clear ownership:\n\n1. Maven example\n\n```xml\n\u003C!-- checkout/pom.xml -->\n\u003CdistributionManagement>\n    \u003Crepository>\n        \u003Cid>gitlab-maven\u003C/id>\n        \u003Curl>${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/maven\u003C/url>\n    \u003C/repository>\n\u003C/distributionManagement>\n```\n\n2. npm example\n\n```json\n// ui-components/package.json\n{\n  \"name\": \"@company/ui-components\",\n  \"publishConfig\": {\n    \"registry\": \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/npm/\"\n  }\n}\n```\n\n### Consuming configuration\n\nThe power of root group consumption comes into play here. All teams configure a single endpoint for package access:\n\n1. Maven example\n\n```xml\n\u003C!-- Any project's pom.xml -->\n\u003Crepositories>\n    \u003Crepository>\n        \u003Cid>gitlab-maven\u003C/id>\n        \u003Curl>https://gitlab.example.com/api/v4/groups/company/-/packages/maven\u003C/url>\n    \u003C/repository>\n\u003C/repositories>\n```\n\n2. npm example\n\n```\n# Any project's .npmrc\n@company:registry=https://gitlab.example.com/api/v4/groups/company/-/packages/npm/\n```\n\nThis configuration automatically provides access to all packages across your organization while maintaining the benefits of project-based publishing.\n\n## Authentication and access control\n\nGitLab's model simplifies authentication through deploy tokens and CI/CD integration.\n\n### For CI/CD pipelines\n\nGitLab automatically handles authentication in pipelines using `CI_JOB_TOKEN`:\n\n```yaml\n# .gitlab-ci.yml\npublish:\n  script:\n    - mvn deploy  # or npm publish\n  # CI_JOB_TOKEN provides automatic authentication\n```\n\n### For development\n\nUse group deploy tokens for package consumption:\n\n- Create read-only deploy tokens at the root group level\n- Rotate tokens periodically for security\n- Share a single configuration across all developers\n\n## Benefits of root group package registry\n\n1. Simplified configuration\n   - One URL for all package access\n   - Consistent setup across teams\n   - Easy token rotation\n2. Clear ownership\n   - Packages stay with their source code\n   - Teams maintain control over publishing\n   - Version history tied to project activity\n3. Natural organization\n   - Matches your company structure\n   - Supports team autonomy\n   - Enables cross-team collaboration\n\n## Getting started\n\n1. Set up your root group\n   - Create a clear group structure\n   - Configure appropriate access controls\n   - Create group deploy tokens\n2. Configure team projects\n   - Set up project-level publishing\n   - Implement CI/CD pipelines\n   - Document package naming conventions\n3. Standardize consumption\n   - Configure root group registry access\n   - Share deploy tokens securely\n   - Document package discovery process\n\n## Summary\n\nGitLab's package registry model, particularly when leveraging root group consumption, offers a powerful solution for enterprise package management. By combining project-based publishing with root group consumption, organizations get the best of both worlds: clear ownership and simplified access. This approach scales naturally with your organization while maintaining security and ease of use.\n\nStart by implementing this model with a single team or division, and expand as you see the benefits of this integrated approach. Remember that while this post focused on Maven and npm, the same principles apply to all package types supported by GitLab.\n\n> Get started with package registries today! Sign up for a [free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n",[772,684,9],{"slug":1037,"featured":91,"template":688},"structuring-the-gitlab-package-registry-for-enterprise-scale","content:en-us:blog:structuring-the-gitlab-package-registry-for-enterprise-scale.yml","Structuring The Gitlab Package Registry For Enterprise Scale","en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale.yml","en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale",{"_path":1043,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1044,"content":1050,"config":1056,"_id":1058,"_type":14,"title":1059,"_source":16,"_file":1060,"_stem":1061,"_extension":19},"/en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"title":1045,"description":1046,"ogTitle":1045,"ogDescription":1046,"noIndex":6,"ogImage":1047,"ogUrl":1048,"ogSiteName":673,"ogType":674,"canonicalUrls":1048,"schema":1049},"Tutorial: How to set up your first GitLab CI/CD component","Use Python scripts in your GitLab CI/CD pipelines to improve usability. In this step-by-step guide, you'll learn how to get started building your own CI/CD component.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098410/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2826%29_3lH4gZFVIGCndksN6Rlg85_1750098409928.png","https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: How to set up your first GitLab CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-12\",\n      }",{"title":1045,"description":1046,"authors":1051,"heroImage":1047,"date":1053,"body":1054,"category":681,"tags":1055},[1052,747],"Sophia Manicor","2024-11-12","Do you use Python scripts in your GitLab CI pipelines? Do you want to create pipelines at scale? This tutorial shows how to set up your first [GitLab CI/CD component](https://docs.gitlab.com/ee/ci/components/) to deploy Python scripts. \n\nA [CI/CD component is a reusable single pipeline configuration unit](https://about.gitlab.com/blog/introducing-ci-components/). Use components to create a small part of a larger pipeline, or even to compose a complete pipeline configuration.\n\n# Prerequisites\n- Basic Python knowledge\n- Working knowledge of GitLab CI\n- 8 minutes\n\n## Python script \n\n* **[The demo Python script](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/src/script.py?ref_type=heads)**\n\nThis Python script utilizes a library called [ArgParse](https://docs.python.org/3/library/argparse.html) . ArgParse allows you to pass variables to script through the command line. This script takes in three arguments:\n[Python_container_image](https://docs.gitlab.com/ee/ci/yaml/#image): This is the Python container image you wish to use.\n[Stage](https://docs.gitlab.com/ee/ci/yaml/#stage): This is the GitLab CI stage in which you job will run in. \nName: This is your name.\n\n```python\nimport argparse\n\nparser = argparse.ArgumentParser(description='Python CICD Component Boilerplate')\nparser.add_argument('python_container_image', type=str, help='python:3.10-slim')\nparser.add_argument('stage', type=str, help='Build')\nparser.add_argument('persons_name', type=str, help='Noah')\nargs = parser.parse_args()\n\npython_container_image = args.python_container_image\nstage = args.stage\npersons_name = args.persons_name\n```\n\nThis will take in these three variables and print out simple statements:\n\n```python\nprint(\"You have chosen \" + python_container_image + \" as the container image\")\nprint(\"You have chosen \" + stage + \" as the stage to run this job\")\nprint(\"Thank you \" + persons_name + \"! you are succesfully using GitLab CI with a Python script.\")\n```\n\nTo test this script locally, you can call on the script by utilizing the following command:\n\n```bash\npython3 src/script.py python_container_image stage name\n```\n\nModify this script accordingly if you’d like to add in your own arguments!\n\n## Template \n\n* **[Demo of template](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/templates/template.yml?ref_type=heads)**\n\n**Note:** As long as the `gitlab-ci.yml` is placed in the templates/directory, the CI/CD component will know to pick it up. We named our template `templates.yml`, but any name would work for this YAML file.\n\nNow, getting into the fun part of CI/CD components, inputs!  [Inputs](https://docs.gitlab.com/ee/ci/yaml/inputs.html) allow you to pass through variables into your pipeline. \n\n```yml\nspec:\n  inputs:\n    python_container_image:\n      default: python:3.10-slim\n      description: \"Define any python container image\"\n    stage:\n      default: build\n      description: \"Define the stage this job will run in\"\n    persons_name:\n      default: Noah\n      description: \"Put your name here\"\n```\nHere we have defined the three inputs that are our arguments in our Python script. You can see for each input we have added in a default value – this will be what the input is set to if not overridden. If we took out this default keyword the input would become mandatory when we use our component. As it is written now, adding in these inputs when we use our component is optional due to our default values.\n\nWe can also set descriptions to ensure that other developers can understand what to input when they use our component. Descriptions are optional but they provide self documentation within the code itself, which is always nice.\n\nAfter we set up our inputs, let’s write the rest of our component:\n\n```yml\ncomponent:\n  image: $[[ inputs.python_container_image ]]\n  stage: $[[ inputs.stage ]]\n  before_script:\n    - pip3 install -r src/requirements.txt\n  script: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\n\nTo use inputs in our component, we need to use the syntax `$[[ inputs.$VARIABLE ]]`. In the above code, you can see that we use inputs to define our image and stage with  `$[[ inputs.python_container_image ]]` and   `$[[ inputs.stage ]] `.\n\n```\nscript: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\nDiving into the script section, you can see we call upon our Python script.. We are able to pass our inputs in with the help of the ArgParse.\n\nNow that you have reviewed how the Python script works and the template has been set up, it is time to use the component!\n\n## Using the component \n\n* **[A demo of including the component](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/.gitlab-ci.yml?ref_type=heads)\n\nIn order to utilize the CI/CD component we just created, we need to include it in the `.gitlab-ci.yml` file that is in the root of our directory. \n\n```\ninclude:\n  # include the component located in the current project from the current SHA\n  - component: $CI_SERVER_FQDN/$CI_PROJECT_PATH/template@$CI_COMMIT_SHA\n    inputs:\n      python_container_image: python:3.11-slim\n      stage: test\n      persons_name: Tanuki\n```\n\nOne way to include it is to call upon it locally in the current project from the current `Commit SHA`. You can find other ways to [reference a component in our documentation](https://docs.gitlab.com/ee/ci/components/#use-a-component).\n\nTo override the defaults, we have passed in other inputs so we get the correct image, stage, and name for our job. \n\nTry and change the `persons_names` to your own and watch the pipeline run!\n\n![ci/cd component tutorial - pipeline running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098419/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098418901.png)\n\nVoila! You have learned how to set up a basic C/ICD component utilizing a Python ArgParse script!\n\n## What's next?\nIn the Python script, there is a commented out GitLab Python library and OS library. If you would like to interact with the GitLab API, you can uncomment these and add in a [GitLab personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) to the [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) named `GLPAT`.\n\n```\nimport gitlab\nimport os\n```\nAfterwards you can then interact with the GitLab API.\n\n```\nglpat = os.environ['GLPAT']\n\ngl = gitlab.Gitlab(private_token=glpat)\n# SELF_HOSTED gl = gitlab.Gitlab(url='https://gitlab.example.com', private_token='xxxxxxxxxxxxxx')\ntry:\n   projects = gl.projects.list(get_all=True)\n   print(projects)\nexcept Exception as error:\n   print(\"Error:\", error)\n```\n\n> Learn more about CI/CD components and how to avoid building pipelines from scratch with the [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). \n\n## Read more\n\n- [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n- [Introducing CI/CD Steps, a programming language for DevSecOps automation](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n- [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n",[109,684,772,9],{"slug":1057,"featured":91,"template":688},"tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","content:en-us:blog:tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","Tutorial How To Set Up Your First Gitlab Ci Cd Component","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"_path":1063,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1064,"content":1070,"config":1078,"_id":1080,"_type":14,"title":1081,"_source":16,"_file":1082,"_stem":1083,"_extension":19},"/en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"title":1065,"description":1066,"ogTitle":1065,"ogDescription":1066,"noIndex":6,"ogImage":1067,"ogUrl":1068,"ogSiteName":673,"ogType":674,"canonicalUrls":1068,"schema":1069},"Ultimate guide to migrating from AWS CodeCommit to GitLab","Learn how to migrate from AWS Services to GitLab and seamlessly integrate with the DevSecOps platform in this comprehensive tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097810/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2828%29_4mi0l4wzUa5VI4wtf8gInx_1750097810027.png","https://about.gitlab.com/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to migrating from AWS CodeCommit to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"},{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":1065,"description":1066,"authors":1071,"heroImage":1067,"date":1075,"body":1076,"category":685,"tags":1077},[1072,703,1073,1074],"Tsukasa Komatsubara","Samer Akkoub","Bart Zhang","2024-08-26","On July 25, 2024, AWS made a significant announcement regarding its CodeCommit service. As detailed in their [official blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/), AWS has decided to close new customer access to CodeCommit. While existing customers can continue using the service, AWS will not introduce new features, focusing only on security, availability, and performance improvements.\n\nThis announcement has prompted development teams to consider migrating their repositories to alternative Git providers. In light of these changes, we've prepared this comprehensive guide to assist teams in migrating to GitLab and integrating with other AWS services.\n\n**Note:** For more details on AWS's official migration recommendations, please refer to [their blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/).\n\n## About this guide\n\nThis guide provides comprehensive information for development teams using GitLab who are considering integration with AWS services or planning to migrate from AWS-hosted Git repositories to GitLab.com. The guide is structured into three main sections:\n\n- [Parallel migration to GitLab](#section-1-parallel-migration-to-gitlab): Explains how to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n\n- [Integration with AWS CodeBuild](#section-2-integrating-gitlab-with-aws-codebuild): Provides steps to integrate GitLab repositories with AWS CodeBuild, setting up a powerful continuous integration (CI) environment.\n\n- [Integration with AWS CodePipeline](#section-3-integrating-gitlab-with-aws-codepipeline): Details how to connect GitLab repositories with AWS CodePipeline to build efficient continuous delivery (CD) pipelines.\n\n- [Downstream integrations for CodePipeline and CodeStar Connections](#section-4-migrating-to-gitlab): Explains how to leverage GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nThrough this guide, you'll learn how to combine the powerful features of GitLab and AWS to create an efficient and flexible development workflow.\n\n## Section 1: Parallel migration to GitLab \n\nFor those considering migrating Git repositories hosted on AWS to GitLab.com, this section, which is a phased approach, introduces methods to achieve migration while minimizing risks. By leveraging GitLab's mirroring capabilities, you can maintain existing development flows while testing the new environment.\n\n### Why is parallel migration important?\n\nLarge-scale system migrations always involve risks, particularly potential impacts on ongoing development work, existing integrations, and automated processes. Adopting a parallel migration approach offers the following benefits:\n\n1. Risk minimization: Test the new environment while keeping existing systems operational.\n2. Seamless transition: Development teams can gradually acclimate to the new system.\n3. Integration testing: Thoroughly test all integrations and automation in the new environment.\n4. Future-proofing: Enable teams to gradually migrate to GitLab CI/CD in parallel to existing CI.\n\nParallel migration is not required if it is already known that you want to cut over directly to GitLab.\n\n### Steps for migrating to GitLab.com\n\n#### Step 1: Get set up on GitLab.com\n\n- Check if your company already has a group in use on GitLab.com and whether they have single sign-on (SSO) set up – if they do, then you will want to use both.\n\n- If your company does not have a presence on GitLab.com, visit [GitLab.com](www.gitlab.com) and create a new account or log in to an existing one.\n- Create a new company namespace (a group at the root level of gitlab.com).\n- Pick a name that reflects your entire company (and is not already taken).\n\n#### Step 2: Import repository\nFor parallel migration: Use GitLab's pull mirroring feature to automatically sync changes from AWS-hosted repositories to GitLab.com.\n\n1. Navigate to the target group GitLab.com.\n2. In the upper right, click \"New project.\"\n3. On the \"Create new project\" page, click \"Import project.\"\n4. On the \"Import project\" page, click \"Repository by URL.\"\n5. Enter the URL of your AWS-hosted repository in the \"Git repository URL\" field.\n6. Underneath the Git repository URL field, check \"Mirror repository.\"\n7. Set up authentication: in the AWS CodeCommit console, select the clone URL for the repository you will migrate. If you plan on importing CodeCommit repositories into GitLab, you can use the HTTPS CodeCommit URL to clone the repository via GitLab Repository Mirroring. You will need to also provide your Git credentials from AWS for your identity and access management (IAM) user within GitLab. You can create Git credentials for AWS CodeCommit by following this [AWS guide](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html).\n\n![Clone URL](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/clone-url-screenshot__1__aHR0cHM6_1750097822121.png)\n\nThis setup will automatically pull changes from the AWS-hosted repository to GitLab.com every five minutes by default.\n\nFor more information, read our [repository mirroring documentation](https://docs.gitlab.com/ee/user/project/repository/mirror/).\n\n#### Step 3: Test and validate integrations\n\n1. CI/CD pipelines: Set up the `.gitlab-ci.yml` file in GitLab CI to replicate existing pipelines. You can read more about [planning a migration from other CI tools into GitLab CI/CD](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html).\n2. Issue tracking: Import project issues and test workflows.\n3. Code review: Set up the merge request process and test review workflows.\n\n#### Step 4: Gradual migration\n\n1. Start with small or non-critical projects to familiarize yourself with working on GitLab.com.\n2. Provide training for team members and allow time to adapt to new workflows.\n3. Gradually migrate more projects while ensuring integrations and workflows are problem-free.\n\nFor more information, see [Automating Migrations from CodeCommit to GitLab](https://gitlab.com/guided-explorations/aws/migrating-from-codecommit-to-gitlab/-/blob/main/migrating_codecommit_to_gitlab.md).\n\n#### Step 5: Complete migration\nOnce all tests and validations are complete and the team is comfortable with the new environment, plan for full migration. For each project:\n\n1. Set a migration date and notify all stakeholders.\n2. Perform final data synchronization.\n3. Remove mirroring settings from the GitLab project.\n4. Set AWS-hosted repositories to read-only and transition all development work to GitLab.com.\n\n#### Step 6: Assess adoption of new capabilities\n\nGitLab collaboration and workflow automation for developers is far richer than CodeCommit. It merits some time to learn what these capabilities are. The merge request process is especially rich compared to CodeCommit.\n\nAfter repositories are stable on GitLab, it is very easy to experiment with GitLab CI/CD in parallel to an existing solution. Teams can take time to perfect their GitLab CI/CD automation while production workflows remain unaffected.\n\nGitLab artifact management is also very capable with the Releases feature and many package registries.\n\n### Section 1: Summary\nBy adopting a parallel migration approach to GitLab, you can achieve a smooth transition while minimizing risks. This process allows teams to gradually adapt to the new environment and ensure all integrations and automations function correctly. Cutover migrations only omit a single setting checkbox if it is known that a parallel migration is not necessary.\n\n## Section 2: Integrating GitLab with AWS CodeBuild\n\nFor those wanting to build and test code from GitLab repositories using AWS CodeBuild, this comprehensive guide will help you set up an efficient CI pipeline.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodeBuild service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connect setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822122.png)\n\n### Step 2: Create AWS CodeBuild project\n\n1. Click \"Create build project\" on the CodeBuild dashboard.\n2. Enter a project name and description.\n3. For source settings, select \"GitLab\" as the provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n\n![Add CodeBuild project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_3_add_codebuild_aHR0cHM6_1750097822123.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 2\nThis section explained in detail how to integrate GitLab repositories with AWS CodeBuild. This setup enables a continuous integration pipeline where code changes in GitLab are automatically built and tested using AWS CodeBuild.\n\n## Section 3: Integrating GitLab with AWS CodePipeline\n\nFor those looking to implement continuous delivery from GitLab repositories using AWS CodePipeline, this detailed guide will be helpful. The integration has become even easier now that GitLab is available as an AWS CodeStar Connections provider.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodePipeline service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connections setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822125.png)\n\n### Step 2: Create AWS CodePipeline\n\n1. Click \"Create pipeline\" on the CodePipeline dashboard.\n2. Enter a pipeline name and click \"Next.\"\n3. Select \"GitLab\" as the source provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n5. Select the Trigger type: You can trigger CodePipeline pipeline execution based on either pull or push events against specific branches and file types within your repository.\n\n![Add source provider](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_provider_aHR0cHM6_1750097822127.png)\n\n![Add source configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_configured_aHR0cHM6_1750097822129.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 3\nThis section detailed how to integrate GitLab repositories with AWS CodePipeline. This setup enables a continuous delivery pipeline where code changes in GitLab are automatically deployed to your AWS environment.\n\n## Section 4: Migrating to GitLab\n\nIntegrating GitLab with AWS unlocks powerful capabilities for streamlining your development and deployment workflows and helps to solve your source code management woes. This integration can be achieved in several ways, each offering unique benefits:\n\n- Using AWS CodeStar Connections to link GitLab with AWS services enables a more cohesive workflow by allowing external Git repositories, like GitLab, to connect with various AWS services. This setup supports automated builds, deployments, and other essential actions directly from your GitLab repository, making your development process more integrated and streamlined.\n\n- Connecting GitLab with AWS CodePipeline via AWS CodeStar Connections takes automation to the next level by allowing you to create a full CI/CD pipeline. This approach integrates GitLab with AWS CodePipeline, enabling you to automate the entire process – from source control and builds to testing and deployment – using AWS services like CodeBuild and CodeDeploy. This ensures a robust, scalable, and efficient delivery process.\n\n![Chart of new technology and solutions for using GitLab and AWS together](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/Announcing_New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together_aHR0cHM6_1750097822130.png)\n\n1\\. Connecting GitLab with AWS services using AWS CodeStar Connections\n\nAWS CodeStar Connections is a service that allows you to connect external Git repositories (such as GitHub or Bitbucket) to AWS services. You can also connect GitLab to AWS services via CodeStar Connections. When using GitLab, you may need to set up a custom connection as an HTTP Git server.\nThe following AWS services can be connected to GitLab using this method:\n\n- **AWS Service Catalog**\n\nAWS Service Catalog helps organizations standardize and manage AWS resources. Integrating it with GitLab improves transparency in resource management and simplifies change tracking. Specifically, you can automate catalog updates based on GitLab commits, enhancing operational efficiency.\n\n- __AWS CodeBuild__\n\nAWS CodeBuild is a managed build service that compiles source code, runs tests, and produces deployable software packages. Integrating GitLab with CodeBuild allows automated build processes to start whenever code changes are pushed to GitLab. This ensures consistency in builds and facilitates easier collaboration and version control.\n\n- __AWS Glue Notebook Jobs__\n\nAWS Glue Notebook Jobs is a service that allows you to interactively develop and run data preparation and ETL (Extract, Transform, Load) tasks. Integrating GitLab with Glue Notebook Jobs enables version control for notebooks and ETL scripts, promotes collaboration among team members, and improves the quality management of data processing pipelines.\n\n- __AWS Proton__\n\nAWS Proton is a service that automates the development and deployment of microservices and serverless applications. By integrating GitLab with AWS Proton, you can manage infrastructure as code, automate deployments, and ensure consistent environment management, leading to more efficient development processes.\n\nAs AWS CodeStar Connections supports more services, connecting GitLab with additional AWS services will become easier. It's advisable to regularly check for new services that support CodeStar Connections.\n\n2. Connecting CodePipeline with GitLab via AWS CodeStar Connections (including CodeDeploy)\n\nAWS CodePipeline is a continuous delivery service that automates the release process for software. To connect GitLab with CodePipeline, you need to use AWS CodeStar Connections. This setup allows you to designate a GitLab repository as the source and automate the entire CI/CD pipeline.\nThe primary actions supported by CodePipeline include:\n- **Source control:** AWS CodeCommit, GitHub, Bitbucket, GitLab\n- **Build and test:** AWS CodeBuild, Jenkins\n- **Deploy:** AWS CodeDeploy, Elastic Beanstalk, ECS, S3\n- **Approval:** Manual approval\n- **Infrastructure management:** AWS CloudFormation\n- **Serverless:** AWS Lambda\n- **Testing:** AWS Device Farm\n- **Custom Actions:** AWS Step Functions\n\nBy integrating GitLab with CodePipeline, you can automatically trigger the pipeline whenever code changes are pushed to GitLab, allowing a consistent process from build to deployment. Additionally, combining this with GitLab's version control capabilities makes it easier to track deployment history and states, leading to more flexible and reliable software delivery.\n\n## What you've learned\nThis guide has provided comprehensive information on migrating to and integrating GitLab with AWS. Through the four main topics, we've covered:\n- Parallel migration to GitLab: How to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n- Integration with AWS CodeBuild: Steps to set up a powerful CI environment integrated with GitLab repositories.\n- Integration with AWS CodePipeline: How to build efficient continuous delivery pipelines using GitLab repositories.\n- Downstream integrations for CodePipeline and CodeStar Connections: Leveraging GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nAs every organization's code hosting and integration implementation strategy is unique, this tutorial may be used as a starting point for your own GitLab + AWS integration and implementation strategy.\n\n## Additional resources\n\nFor more detailed information and advanced configurations, refer to the following resources:\n\n- [GitLab documentation](https://docs.gitlab.com/)\n- [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html)\n- [AWS CodePipeline User Guide](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/)\n- [Integrate with AWS](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html)\n\nIf you have questions or need support, please contact [GitLab Support](https://about.gitlab.com/support/) or AWS Support. We hope this comprehensive guide helps you in your AWS-GitLab integration journey.",[109,683,482,684,9,685,231],{"slug":1079,"featured":91,"template":688},"ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","content:en-us:blog:ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","Ultimate Guide To Migrating From Aws Codecommit To Gitlab","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"_path":1085,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1086,"content":1092,"config":1099,"_id":1101,"_type":14,"title":1102,"_source":16,"_file":1103,"_stem":1104,"_extension":19},"/en-us/blog/whiteboarding-remote-work-superpower",{"title":1087,"description":1088,"ogTitle":1087,"ogDescription":1088,"noIndex":6,"ogImage":1089,"ogUrl":1090,"ogSiteName":673,"ogType":674,"canonicalUrls":1090,"schema":1091},"Virtual whiteboarding is a remote work super power","Want to master a collective understanding of technical explanations remotely? Learn how to use virtual whiteboards to their maximum capabilities in this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682431/Blog/Hero%20Images/kvalifik-5Q07sS54D0Q-unsplash.jpg","https://about.gitlab.com/blog/whiteboarding-remote-work-superpower","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Virtual whiteboarding is a remote work super power\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-01\",\n      }",{"title":1087,"description":1088,"authors":1093,"heroImage":1089,"date":1094,"body":1095,"category":681,"tags":1096},[703],"2022-09-01","\n\nAt one point in my career I had a solo business in technology training. During this time, I went through a transition from live, in-person classes to live-delivered virtual classes. One of the things that I dearly missed in virtual delivery was unpacking explanations through whiteboarding. The difference in the speed and completeness of achieving common understanding across the group was very evident by the nature of the questions and discussions that occured immediately afterward. \n\nAt that time, it was difficult to find solutions that enabled me to do this as fluidly as an in-person classroom experience. I persisted and came up with an elaborate solution involving software and hardware – but the results of a fluid whiteboarding experience for both presenter and participants were preserved.\n\n## Explaining and collaborating through drawings\n\n“[The Back of the Napkin: Solving Problems and Selling Ideas with Pictures](https://www.penguinrandomhouse.com/books/300247/the-back-of-the-napkin-expanded-edition-by-dan-roam/)” is a great book on the topic of leveraging drawing in business meetings. The title contains two main themes. “Selling Ideas” is about explaining something you already understand in a highly effective way that creates shared understanding. “Solving Problems” is a very different mode, that of collaborating to create a new visual model that documents the structure of a problem or envisions a new solution. While there are variations on these two themes, these appear to be two most fundamental modes of using drawing in a group context.\n\n## The importance of progressive disclosure in understanding technical explanations \n\nTechnical explanation is challenging on its own - but the situation is made much worse by presenting complex visuals fully formed. Using a whiteboard for the same explanation leverages the progressive disclosure element of storytelling and overlays it on a technical visualization. This has a fundamental effect of enhancing understanding because when a complex visual appears completely formed, the visual cortex hijacks all neurological attention resources (including listening) as it attempts to make sense of the scene. Progressive disclosure allows the minds of listeners to focus on the verbal explanation because only the component being described is visible. As the narrator reveals the next chunk by drawing while explaining the relationship to the last chunk, the audience naturally shifts their full attention.\n\nYou could think of this effect as being the same as what a cartoon strip does to create a sense of progressive disclosure. By simply framing the scenes, our mind automatically focuses on one frame at a time in the intended sequence. The difference with technical explanations is that the final view is extremely informative without frames - it paints a global picture of the sum of the parts.\n\nThe magic of infographics is also the enforcement of progressive disclosure on their topic matter by purposely creating a visual so long that it must be scrolled. Frequently they are also organized around a natural or contrived timeline that the disclosure of the component parts progresses through. They frequently also create frames through visual effects such as lines, shapes, and whitespace.\n\nTechnical visualizations frequently have the characteristic that a big-picture visual is very valuable for complete understanding. Progressive disclosure resolves the contradictory requirements for both a “parts-level understanding” and a “big-picture understanding” of a technical design visualization. This is accomplished by layering up the big picture through many bite-sized explanations - exactly like how the mind's eye creates the world of a story when it is narrated in a sequence of small parts.\n\nWhiteboarding, by nature, can only be done as progressive disclosure and, in doing so, it transforms technical explanation into much more digestible and memorable frames for the audience to consume.\n\n## Maintaining a common understanding of the composite vision\n\nIn order to have the best chance to foster a \"group creative flow,\" everyone who is collaborating needs to maintain a common understanding of the composite vision as rapidly emerging ideas and insights are iteratively worked in. Whiteboarding fulfills this need in a way that is not distracting to the effort because a visualization of the vision is maintained in real-time during collaboration.\n\nFrequently group insights compound on each other as ideas are expanded by building on idea expressed by someone else in the group. Whiteboarding provides a real-time composite visualization which accelerates more new and valuable insights. Drawing frequently enables collaborators to draw things they can't find the words for at the moment. If the conditions are right, there is the potential for a snowball effect of synergistic ideas being incorporated into the composite whole.\n\nThis is where the need for the mechanism for holding the common understanding needs to be fluid and non-distracting.\n\n## Solutions architecture requires both technical explanation and collaboration\n\nIn my job as a Solutions Architect, it turns out that explanation of technical visuals and collaboration in creating technical designs are critical to making helpful contributions to colleagues, customers, and partners.\n\nIt is truly amazing how much more quickly a group of people can get on the same page and innovate when whiteboarding is available.\n\nWhen there are language barriers, it takes on an even higher value as visuals are not dependent on language and can help store the real-time common understanding between different language speakers. Multiple times, I’ve found that speakers who are working through a translator get excited and are emboldened to start talking in English (my native tongue). Generally, the technical terms are recognized in any language and adding them to the diagram fuels more mutual understanding.\n\nWhen I came to GitLab as a Solutions Architect, I, once again, began to experiment with ways to make fluid whiteboarding easy to do in any meeting.\n\n## Better than real whiteboards for in-person meetings\n\nOccasionally, when working hard for one objective, you accidentally achieve some objectives you didn’t even know you should or could have. This is known as serendipity.\n\nThis is what has happened in my pursuit of very fluid virtual whiteboarding. I found virtual whiteboarding handles a lot of logistics and practical considerations for whiteboarding for in-person meetings such as:\n\n* Verifying availability of whiteboards at a meeting venue\n* Simultaneous whiteboards and computer projection visibility (I’ve been in rooms where you had to stop projecting use the whiteboard)\n* You don’t even need a projector if you do an in-person virtual meeting to share your screen\n* Marker management - smell, mess, dried-out markers\n* The inability to preserve every whiteboard that you draw due to needing to erase for the next one\n* The inability to electronically store or share the visuals\n\n## The challenges of hardware and software selection\n\nI wish I could honestly say the process of putting together a fluid virtual whiteboard setup is now easy but I have not found that to be the case.\n\n### Mental flow requires fluid technology\n\nWhether explaining or collaborating the concept of mental flow is very critical. If the need for flow is interrupted by things that should be transparent, it is frustrating for everyone and the audience quickly loses attention. It interrupts the thoughts of both the whiteboarder and the participants.\n\nThink of the times that someone starts whiteboarding and the one and only marker goes dry and they have to hunt for one. Virtual whiteboarding can actually make the problem of interrupting flow much worse. This is because if there are delays in the hardware or software, the shape of what you are drawing gets incorrectly “smoothed”. \n\nA lack of fluidity will generally make your shapes challenging to draw and then you slow down to allow the system to recognize your strokes and, well, it’s not fluid anymore - it’s distracting and effortful. And the rending of shapes aren’t the worst of it, when trying to add text to label diagram parts, lack of fluidity causes the smaller strokes of text to be unrecognizable. A lack of fluid drawing completely kills the presenters desire to use drawing and the audiences desire to listen to the pictures.\n\n### Fluidity of the drawing activity\n\nIn trying to devise a cost-effective, yet fluid, setup I’ve tried all the shortcuts - such as using capacitive touchscreens with a stylus and using web apps as the primary whiteboarding software. Both of these are deal breakers for me because after trying many instances on these two options, they just never work out to have sufficient fluidity. \n\nSo here are the constraints I ended up adopting to make drawing itself very fluid:\n\n* **Use an iOS or Android mobile platform tablet** - as it has the following advantages:\n    * Native mobile apps are much, much more fluid than web apps.\n    * Many more native software options than there are for laptops.\n    * More modular and cheaper hardware in the long run than attempting to gain these capabilities in a laptop or desktop.\n* **Must support active pen technology** - capacitive touchscreens, even with a stylus, don’t cut it. When working rapidly, the smoothing algorithms aren’t very smooth - this makes drawing shapes difficult, but more importantly it makes writing words especially difficult.\n* **Having a stylus that is the correct length and thickness** is important for fluid writing and drawing. The stylus that comes with tablets is frequently not the conventional length or thickness of real pens or pencils.\n\n### Fluidity of integrating the act of drawing into virtual meetings\n\nUsing drawing needs to be easy for the meeting host and for the participants.\n\n* For easy virtual sharing, it helps if the native app also has a collaborative web app that updates quickly as it avoids the complications of joining the tablet to the meeting and sharing from there. \n* This enables other meeting participants to whiteboard on the same virtual whiteboard without specialized hardware.\n* Some systems allow guests to join and whiteboard without having to setup an account.\n\n### Fluidity of availability of whiteboarding across teams\n\nThere are multiple elements of what will ensure fluid whiteboarding is available to everyone for collaborations. A primary one is cost, followed by local device availability of active pen tablet options in international markets. Thankfully, the applications covered later support both iOS and Android which helps in finding affordable and locally available options.\n\n* Cost\n* Mobile apps tend to be more likely to be available for both major mobile operating systems compared to a native desktop-only solution being available on multiple desktop platforms\n* Mobile platform flexibility and global cost and availability compared to pursuing laptops with the same capability\n\n## A working example setup\n\n![](https://about.gitlab.com/images/blogimages/virtualwhiteboarding/whiteboarding-setup-samsung8.jpg)\n\n### Tablet\n\n* My first tablet was a [Samsung Galaxy Tab A 8.0 with Spen (SM-P200)](https://www.amazon.com/gp/product/B07TS2N27S/) that cost me USD $235. It is actually an international edition as the US market does not seem to offer an active pen tablet in the 8” format. In this case, the stylus fits inside the tablet so is really not appropriate for fluid drawing and writing. \n* Since my first purchase, Samsung has come out with a less expensive line of large tablets with active pen technology, so I now also have the [Samsung Galaxy Tab S6 Lite 10.4 inch (SM-P610NZBAXAR)](https://www.amazon.com/SAMSUNG-Android-Included-Speakers-SM-P610NZBAXAR/dp/B086Z3S3MY/), which I obtained for USD $250. While the stylus in this unit is considered more full-sized, the thickness, feel, forefinger button and length all cause me to reach for my after-market stylus for the most fluid experience.\n\n### Stylus\n\n* For a stylus, I use the [STAEDTLER 180 Noris digital classic EMR Stylus](https://www.amazon.com/STAEDTLER-22-1-digital-compatibility-purchase/dp/B0728HBD7F). I find the length, weight, and lack of buttons all helpful in handling drawing and writing smoothly. The color makes it a little less easy to lose. The downsides include that there are very few tablet cases that can accommodate it and that it looks so much like a pencil that someone in your household may accidentally toss it in the regular pen jar. Always be sure to verify stylus compatibility with your device's active pen technology.\n\n### Settings\n\nI almost returned the 10” tablet due to a behavior that a couple settings fixed. Android tablets with no physical buttons put a navigation bar on the bottom part of the screen. I found that I was having my palm be picked up by the Home Screen button, which would close the whiteboarding app. Also, when any of these buttons are activated by the stylus, it is equally disruptive to the flow. For the Navigation Bar settings, I set “Navigation type” to “Swipe gestures” and I enable “Block gestures with S Pen” (Android 12, Samsung OneUI v4.1).\n\n### Collaborative whiteboard options\n\nThere are multiple options for the collaborative whiteboard options - some completely free and paid options that carry enough value-add features to consider it for heavy users.\n\n\n\u003Ctable>\n  \u003Ctr>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://www.liveboard.online\">Liveboard Online\u003C/a>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://support.google.com/jamboard/answer/7424836?hl=en\">Google Jamboard\u003C/a>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://miro.com\">Miro\u003C/a>\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Long Term Usage of Free Tier\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>No - Only 3 Boards\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Native Mobile Apps\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Native Desktop Apps\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Web App (for screen sharing)\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Fluidity Extras\n   \u003C/td>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>Multiple pens on deck\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Pages / Canvas\n   \u003C/td>\n   \u003Ctd>Pages\n   \u003C/td>\n   \u003Ctd>Pages\n   \u003C/td>\n   \u003Ctd>Endless Canvas\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Presentation Mode\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Direct Import From Google Slides\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Shape Recognition from hand drawing\n   \u003C/td>\n   \u003Ctd>Light Duty\n   \u003C/td>\n   \u003Ctd>Light Duty\n   \u003C/td>\n   \u003Ctd>Awesome\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Reusability of Drawing as Formal Technical Diagrams\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Viewers Synchronized To Drawing Location\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Manual\n   \u003C/td>\n  \u003C/tr>\n\u003C/table>\n\nSee here for more [GitLab remote work whiteboarding information](/company/culture/all-remote/collaboration-and-whiteboarding/).\n\nPhoto by [Kvalifik](https://unsplash.com/@kvalifik?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/whiteboard?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,1097,1098],"collaboration","remote work",{"slug":1100,"featured":6,"template":688},"whiteboarding-remote-work-superpower","content:en-us:blog:whiteboarding-remote-work-superpower.yml","Whiteboarding Remote Work Superpower","en-us/blog/whiteboarding-remote-work-superpower.yml","en-us/blog/whiteboarding-remote-work-superpower",{"_path":1106,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1107,"content":1112,"config":1119,"_id":1121,"_type":14,"title":1122,"_source":16,"_file":1123,"_stem":1124,"_extension":19},"/en-us/blog/a-story-of-runner-scaling",{"title":1108,"description":803,"ogTitle":1108,"ogDescription":803,"noIndex":6,"ogImage":1109,"ogUrl":1110,"ogSiteName":673,"ogType":674,"canonicalUrls":1110,"schema":1111},"An SA story about hyperscaling GitLab Runner workloads using Kubernetes","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669897/Blog/Hero%20Images/kaleidico-26MJGnCM0Wc-unsplash.jpg","https://about.gitlab.com/blog/a-story-of-runner-scaling","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An SA story about hyperscaling GitLab Runner workloads using Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Brian Wald\"}],\n        \"datePublished\": \"2022-06-29\",\n      }",{"title":1108,"description":803,"authors":1113,"heroImage":1109,"date":1115,"body":1116,"category":681,"tags":1117},[703,1114],"Brian Wald","2022-06-29","\n\nThe following *fictional story*\u003Csup>1\u003C/sup> reflects a repeating pattern that Solutions Architects at GitLab encounter frequently. In the analysis of this story we intend to demonstrate three things: (a) Why one should be thoughtful in leveraging Kubernetes for scaling, (b) How unintended consequences of an approach to automation can create a net productivity loss for an organization (reversal of ROI) and (c) How solutions architecture perspectives can help find anti-patterns - retrospectively or when applied during a development process.\n\n### A DevOps transformation story snippet\n\nGild Investment Trust went through a DevOps transformational effort to build efficiency in their development process through automation with GitLab. Dakota, the application development director, knew that their current system handled about 80 pipelines with 600 total tasks and over 30,000 CI minutes so they knew that scaled CI was needed. Since development occurred primarily during European business hours, they were interested in reducing compute costs outside of peak work hours. Cloud compute was also a target due to acquring the pay per use model combined with elastic scaling.\n\nIngrid was the infrastructure engineer for developer productivity who was tasked with building out the shared GitLab Runner fleet to meet the needs of the development teams. At the beginning of the project she made a successful bid to leverage Kubernetes to scale CI and CD to take advantage of the elastic scaling and high availability all with the efficiency of containers. Ingrid had recently achieved the Certified Kubernetes Administrator (CKA) certification and she was eager to put her knowledge to practical use. She did some additional reading around applications running on Kubernetes and noted the strong emphasis on minimizing the resource profile of microservices to achieve efficiency in the form of compute density. She defined runner containers with 2GB of memory and 750millicores (about three quarters of a CPU) had good results from running some test CI pipelines. She also decided to leverage the Kubernetes Cluster Autoscaler which would use the overall cluster utilization and scheduling to automatically add and remove Kubernetes worker nodes for smooth elastic scaling in response to demand.\n\nAbout 3 months into the proof of concept implementation, Sasha, a developer team lead, noted that many of their new job types were failing with strange error messages. The same jobs ran fine on quickly provisioned GitLab shell runners. Since the primary difference between the environments was the liberal allocation of machine resources in a shell runner, Sasha reasoned that the failures were likely due to the constrained CPU and memory resources of the Kubernetes pods. \n\nTo test this hypothesis, Ingrid decided to add a new pod definition. She found it was difficult to discern which of the job types were failing due to CPU constraints, which ones due to memory constraints and which ones due to the combination of both. She knew it could be a lot of her time to discern the answer. She decided to simply define a pod that was more liberal on both CPU and memory and have it be selectable by runner tagging when more resources were needed for certain CI jobs. She created a GitLab Runner pod definition with 4GB of memory and 1750 millicores of CPU to cover the failing job types. Developers could then use these larger containers when the smaller ones failed by adding the ‘large-container’ tag to their GitLab job.\n\nSasha redid the CI testing and was delighted to find that the new resourcing made all the troubling jobs work fine. Sasha created a guide for developers to try to help discern when mysterious error messages and failed CI jobs were probably the fault of resourcing and then how to add a runner tag to the job to expand the resources.\n\nSome weeks later two of the key jobs that were fixed by the new container resourcing started intermittently failing on NPM package creation jobs for just 3 pipelines on 2 different teams. Of course Sasha tried to understand what the differences were and found that these particular pipelines were packaging notably large file sets because they were actually packaging testing data and the NPM format was a convenient way to provide testing data during automated QA testing.\n\nSasha brought this information to Ingrid and together they did testing to figure out that a 6GB container with 2500 millicores would be sufficient for creating an NPM package out of the current test dataset size. They also discussed whether the development team might want to use a dedicated test data management solution, but it turned out that the teams needs were very simple and that their familiarity with NPM packaging meant that bending NPM packaging to suit their purpose was actually more efficient than acquiring, deploying, learning and maintaining a special system for this purpose. So a new pod resourcing profile was defined and could be accessed with the runner tag ‘xlarge’.\n\nSasha updated the guide for finding the optimal container size through failure testing of CI jobs - but they were not happy with how large the document was getting and how imprecise the process was for determining when a CI job failure was, most likely due to container resource constraints. They were concerned that developers would not go through the process and instead simply pick the largest container resourcing profile in order to avoid the effort of optimizing and they shared this concern with Ingrid. In fact, Sasha noted, they were hard pressed themselves to follow their own guidelines and not to simply choose the largest container for all jobs themselves.\n\nThe potential for this cycle to repeat was halted several months later when Dakota, the app dev director, generated a report that showed a 2% increase in developer time spent optimizing CI jobs using failure testing for container size optimization. Dakota considered this work to be a net new increase because when the company was not using container-based CI, the developers did not have to manage this concern at all. Across 298 developers this amounted to around $840,000/yr dollars of total benefits per month\u003Csup>2\u003C/sup>. It was also thought to add about 2 hours (and growing) to developer onboarding training. It was noted that the report did not attempt to account for the opportunity cost tax - what would these people be doing to solve customer problems with that time? It also did not account for the \"critical moments tax\" (when complexity has an outsized frustration effect and business impact on high pressure, high risk situations).\n\n### Solution architecture retrospective: What went wrong?\n\nThis story reflects a classic antipattern we see at GitLab, not only with regard to Kubernetes runner optimization, but also across other areas, such as overly minimalized build containers and the potential for resultant pipeline complexity as was discussed in a previous blog called [When the pursuit of simplicity creates complexity in container-based CI pipelines](/blog/second-law-of-complexity-dynamics/). Frequently this result comes from inadvertent adherance to heuristics of a small part of the problem as though they were applicable to the entirety of the problem (a type of a logical “fallacy of composition”).\n\nThankfully the emergence of the anti-pattern follows a pattern itself :). Let’s apply a little retrospective solution architecture to the \"what happened\" in order to learn what might be done proactively next time to create better iterations on the next automation project.\n\nThere is a certain approach to landscaping shared greenspaces where, rather than shame people into compliance with signs about not cutting across the grass in key locations, the paths that humans naturally take are interpreted as the signal “there should be a path here.” Humans love beauty and detail in the environments they move through, but depending on the space, they can also value the efficiency of the shortest possible route slightly higher than aesthetics. A wise approach to landscaping holds these factors in a balance that reflects the efficiency versus aesthetic appeal balance of the space user. The space stays beautiful without any shaming required.\n\nIn our story Sasha and Ingrid had exactly this kind of cue where the developers were likely to walk across the grass. If that cue is taken to be a signal that reflects efficiency, we can quickly see what can be done to avoid the antipattern when it starts to occur.\n\nThe signal was the observation that developers might simply choose the largest container all the time to avoid the fussy process of optimizing the compute resources being consumed. Some would consider that laziness and not a good signal to heed. However, most human laziness is deeply rooted in efficiency trade-offs. The developers intuitively understand that their time fussing with failure testing to optimize job containers and their time diagnosing intermittent failures due to the varying content of those jobs, is not worth the amount of compute saved. That is especially true given the opportunity cost of not spending that time innovating the core software solution for the revenue generating application.\n\nIngrid and Sasha’s collaboration has initially missed the scaled human toil factor that was introduced to keep container resources at the minimum tolerable levels. They failed to factor in the escalating cost of scaled human toil to have a comprehensive efficiency measurement. They were following a microservices resourcing pattern which assumes the compute is purpose designed around minimal and well known workloads. When taken as a whole in a shared CI cluster, CI compute follows generalized compute patterns where the needs for CPU, Memory, Disk IO and Network IO can vary wildly from one moment to the next.\n\nIn the broadest analysis, the infrastructure team over indexed to the “team local” optimization of compute efficiency and unintentionally created a global de-optimization of scaled human toil for another team.\n\n## How can this antipattern be avoided?\n\nOne way to combat over indexing on a criteria is to have balancing objectives. This need is covered in \"Measure What Matters\" with the concept of counter balancing objectives. There are some counter balancing questions that can be asked of almost any automation effort. When solution architecture is functioning well these counter balancing questions are asked during the iterative process of building out a solution. Here are some applicable ones for this effort:\n\n**Approporiate Rules: Does the primary compute optimization heuristic match the characteristics of the actual compute workload being optimized?**\n\nThe main benefits of container compute for CI are dependency isolation, dependency encapsulation and a clean build environment for every job. None of these benefits has to do with the extreme resource optimizations available to engineer microservices architected applications. As a whole, CI compute reflects generalized compute, not the ultra-specialized compute of a 12 factor architected micro-service.\n\n**Appropriate granularity: Does optimization need to be applied at every level?**\n\nThe fact that the cluster itself has elastic scaling at the Kubernetes node level is a higher order optimization that will generate significant savings. Another possible optimization that would not require continuous fussing by developers is having a node group running on spot compute (as long as the spot compute runners self-identify their compute as spot so pipeline engineers can select appropriate jobs for spot). These optimizations can create huge savings, without creating scaled human toil.\n\n**People and processes counter check: Does the approach to optimization create scaled human toil by its intensity and/or frequency and/or lack of predictability for any people anywhere in the organization?**\n\nAutomation is all about moving human toil into the world of machines. While optimizing machine resources must always be a primary consideration, it is a lower priority objective than creating a net increase in human toil anywhere in your company. Machines can efficiently and elastically scale, while human workforces respond to scaling needs in months or even years.\n\n### Avoid scaled human toil\n\nNotice that neither the story, nor the qualifying questions, imply there is never a valid reason to have specialized runners that developers might need to select using tags. If a given attribute of runners could be selected once and with confidence then the antipattern would not be in play. One example would be selecting spot compute backed runners for workloads that can tolerate termination. It is the potential for repeated needed attention to calibrate container sizing - made worse by the possibility of intermittent failure based on job content - that pushes this specific scenario into the potential realm of “scaled human toil.” The ability to leverage elastic cluster autoscaling is also a huge help to managing compute resources more efficiently. \n\nIf the risk of scaled human toil could be removed then some of this approach may be able to be preserved. For example, having very large minimum pod resourcing and then a super-size for stuff that breaks the standard pod size just once. Caution is still warranted because it is still possible that developers have to fuss a lot to get a two pod approach working in practice.\n\n### Beware of scaled human toil of an individual\n\nOne thing the story did not highlight is that even if we were able to move all the fussing of such a design to the Infrastructure Engineer persona (perhaps by building an AI tuning mechanism that guesses at pod resourcing for a given job), the cumulative taxes on their role are frequently still not worth the expense. This is, in part, because they have a leveraged role - they help with all the automation of the scaled developer workforce and any time they spend on one activity can’t be spent on another. We humans are generally bad at accounting for opportunity costs - what else could that specific engineer be innovating on to make a stronger overall impact to the organization’s productivity or bottom line? Given the very tight IT labor market, a given function may not be able to add headcount, so opportunity costs take on an outsized importance.\n\n### Unlike people’s time, cloud compute does not carry opportunity cost\n\nA long time ago people had to schedule time on shared computing resources. If the time was used for low-value compute activities it could be taking away time from higher value activities. In this model compute time has an opportunity cost - the cost of what it could be using that time for if it wasn’t doing a lower value activity. Cloud compute has changed this because when compute is not being used, it is not being paid for. Additionally, elastic scaling eliminates the costs of over provisioning hardware and completely eliminates the administrative overhead of procuring capacity - if you need lots for a short period of time it is immediately available. In contrast, people time is not elastically scalable nor pay per use. This means that the opportunity cost question “What could this time be used for if it didn’t have to be spent on low value activities?” is still relevant for anything that creates activities for people.\n\n### The first corollary to the Second Law of Complexity Dynamics\n\nThe Second Law of Complexity Dynamics was introduced in an earlier blog. The essence is that complexity is never destroyed - it is only reformed - and primarily it is moved across a boundary line that dictates whether the management of the complexity is in our domain or externalized. For instance, if you write a function for md5 hashing in your code, you are managing the complexity of that code. If you install a dependency package that contains a premade md5 hash function that you simply use, then the complexity is externalized and managed for you by someone else.\n\nIn this story we are introducing the corollary to that “Law” that “**Exchanging Raw Machine Resources for Complexity Management is Generally a Reasonable Trade-off.**” In this case our scaled human toil is created due to the complexity of unending, daily management of optimizing compute efficiency. This does not mean that burning thousands of dollars of inefficient compute is OK because it saved someone 20 minutes of fussing. It is scoped in the following way:\n\n- scoped to “complexity management” (which is creating the “scaled human toil” in our story) - many minutes of toil that increases proportionally or compounds with more of the activity. \n- scoped to “raw machine resources” - meaning that there is not additional logistics nor human toil to gain the resources. In the cloud raw machine resources are generally available via configuration tweaks.\n- scoped to “generally reasonable” - this indicates a disposition of being very cautious about increasing human toil with an automatoin solution - but it still makes sense to use models or calculations to check if the rule actually holds in a given case.\n\nSo if we can externalize complexity management that is great (The Second Law of Complexity Dynamics). If we can trade complexity management for raw computing resource, that is likely still better than managing it ourselves (The First Corollary).\n\n### Iterating SA: Experimental improvements for your next project\n\nThis post contains specifics that can be used to avoid antipatterns in building out a Kubernetes cluster for GitLab CI. However, in the qualifying questions we’ve attempted to kick it up to one meta-level higher to help assess whether any automation effort may have an “overly local” optimization focus which can inadvertently create a net loss of efficiency across the more global “company context.” It is our opinion that automation efforts that create a net loss in human productivity should not be classified as automation at all. While it’s strong medicine to apply to one’s work, we feel that doing so causes appropriate innovation pressure to ensure that individual automation efforts truly deliver on their inherent promise of higher human productivity and efficiency. So simply ask “Does this way of solving a problem cause recurring work for anyone?”\n\n### DevOps transformation and solution architecture perspectives\n\nA technology architecture focus rightfully hones in on the technology choices for a solution build. However, if it is the only lens, it can result in scenarios like our story. Solutions architecture steps back to a broader perspective to sanity-check that solution iterations account for a more complete picture of both the positive and negative impacts across all three of people, processes and technology. As an organizational competency, DevOps emphasis solution architecture perspectives when it is defined as a collaborative and cultural approach to people, processes and technology.\n\nFootnotes:\n\n1. This fictional story was devised specifically for this article and does not knowingly reflect the details of any other published story or an actual situation. The names used in the story are from [GitLab’s list of personas](https://about.gitlab.com/handbook/product/personas/).\n2. Across a team of 300 full time developers. 9.6min/workday x 250 workdays / year = 2400mins / 8hrs/workday  = 5 workdays x $560 per day (140K Total Comp/250days) = $2800/dev/year x 300 developers = $840,000/yr\n\nCover image by [Kaleidico](https://unsplash.com/@kaleidico?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[708,996,1118,9],"performance",{"slug":1120,"featured":6,"template":688},"a-story-of-runner-scaling","content:en-us:blog:a-story-of-runner-scaling.yml","A Story Of Runner Scaling","en-us/blog/a-story-of-runner-scaling.yml","en-us/blog/a-story-of-runner-scaling",3,[666,693,715,736,758,779,799,819,840],1754336032529]