[{"data":1,"prerenderedAt":856},["ShallowReactive",2],{"/en-us/the-source/":3,"footer-en-us":36,"the-source-navigation-en-us":343,"the-source-newsletter-en-us":370,"featured-article-en-us":382,"ai-categoryen-us":423,"security-categoryen-us":447,"platform-categoryen-us":468,"featured-authors-en-us":489,"category-authors-en-us":521,"hero-most-recent-articles-en-us":522,"platform-most-recent-articles-en-us":630,"the-source-resources-en-us":673,"ai-most-recent-articles-en-us":716,"security-most-recent-articles-en-us":758,"categories-en-us":854},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"config":8,"seo":10,"content":13,"_id":30,"_type":31,"title":7,"_source":32,"_file":33,"_stem":34,"_extension":35},"/en-us/the-source","en-us",false,"",{"layout":9},"the-source",{"title":11,"description":12},"The Source: Insights for the future of software development","Your decision-making partner for transformative strategies and expert technology advice.",[14,16,21,26],{"componentName":15},"TheSourceLandingHero",{"componentName":17,"componentContent":18},"TheSourceLandingCategory",{"config":19},{"category":20},"ai",{"componentName":17,"componentContent":22},{"config":23},{"category":24,"theme":25},"security","surface",{"componentName":17,"componentContent":27},{"config":28},{"category":29},"platform","content:en-us:the-source:index.yml","yaml","content","en-us/the-source/index.yml","en-us/the-source/index","yml",{"_path":37,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"data":38,"_id":339,"_type":31,"title":340,"_source":32,"_file":341,"_stem":342,"_extension":35},"/shared/en-us/main-footer",{"text":39,"source":40,"edit":46,"contribute":51,"config":56,"items":61,"minimal":331},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":41,"config":42},"View page source",{"href":43,"dataGaName":44,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":47,"config":48},"Edit this page",{"href":49,"dataGaName":50,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":52,"config":53},"Please contribute",{"href":54,"dataGaName":55,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":57,"facebook":58,"youtube":59,"linkedin":60},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[62,89,162,230,292],{"title":63,"links":64,"subMenu":70},"Platform",[65],{"text":66,"config":67},"DevSecOps platform",{"href":68,"dataGaName":69,"dataGaLocation":45},"/platform/","devsecops platform",[71],{"title":72,"links":73},"Pricing",[74,79,84],{"text":75,"config":76},"View plans",{"href":77,"dataGaName":78,"dataGaLocation":45},"/pricing/","view plans",{"text":80,"config":81},"Why Premium?",{"href":82,"dataGaName":83,"dataGaLocation":45},"/pricing/premium/","why premium",{"text":85,"config":86},"Why Ultimate?",{"href":87,"dataGaName":88,"dataGaLocation":45},"/pricing/ultimate/","why ultimate",{"title":90,"links":91},"Solutions",[92,97,102,107,112,117,122,127,132,137,142,147,152,157],{"text":93,"config":94},"Digital transformation",{"href":95,"dataGaName":96,"dataGaLocation":45},"/topics/digital-transformation/","digital transformation",{"text":98,"config":99},"Security & Compliance",{"href":100,"dataGaName":101,"dataGaLocation":45},"/solutions/security-compliance/","security & compliance",{"text":103,"config":104},"Automated software delivery",{"href":105,"dataGaName":106,"dataGaLocation":45},"/solutions/delivery-automation/","automated software delivery",{"text":108,"config":109},"Agile development",{"href":110,"dataGaName":111,"dataGaLocation":45},"/solutions/agile-delivery/","agile delivery",{"text":113,"config":114},"Cloud transformation",{"href":115,"dataGaName":116,"dataGaLocation":45},"/topics/cloud-native/","cloud transformation",{"text":118,"config":119},"SCM",{"href":120,"dataGaName":121,"dataGaLocation":45},"/solutions/source-code-management/","source code management",{"text":123,"config":124},"CI/CD",{"href":125,"dataGaName":126,"dataGaLocation":45},"/solutions/continuous-integration/","continuous integration & delivery",{"text":128,"config":129},"Value stream management",{"href":130,"dataGaName":131,"dataGaLocation":45},"/solutions/value-stream-management/","value stream management",{"text":133,"config":134},"GitOps",{"href":135,"dataGaName":136,"dataGaLocation":45},"/solutions/gitops/","gitops",{"text":138,"config":139},"Enterprise",{"href":140,"dataGaName":141,"dataGaLocation":45},"/enterprise/","enterprise",{"text":143,"config":144},"Small business",{"href":145,"dataGaName":146,"dataGaLocation":45},"/small-business/","small business",{"text":148,"config":149},"Public sector",{"href":150,"dataGaName":151,"dataGaLocation":45},"/solutions/public-sector/","public sector",{"text":153,"config":154},"Education",{"href":155,"dataGaName":156,"dataGaLocation":45},"/solutions/education/","education",{"text":158,"config":159},"Financial services",{"href":160,"dataGaName":161,"dataGaLocation":45},"/solutions/finance/","financial services",{"title":163,"links":164},"Resources",[165,170,175,180,185,190,195,200,205,210,215,220,225],{"text":166,"config":167},"Install",{"href":168,"dataGaName":169,"dataGaLocation":45},"/install/","install",{"text":171,"config":172},"Quick start guides",{"href":173,"dataGaName":174,"dataGaLocation":45},"/get-started/","quick setup checklists",{"text":176,"config":177},"Learn",{"href":178,"dataGaName":179,"dataGaLocation":45},"https://university.gitlab.com/","learn",{"text":181,"config":182},"Product documentation",{"href":183,"dataGaName":184,"dataGaLocation":45},"https://docs.gitlab.com/","docs",{"text":186,"config":187},"Blog",{"href":188,"dataGaName":189,"dataGaLocation":45},"/blog/","blog",{"text":191,"config":192},"Customer success stories",{"href":193,"dataGaName":194,"dataGaLocation":45},"/customers/","customer success stories",{"text":196,"config":197},"Remote",{"href":198,"dataGaName":199,"dataGaLocation":45},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":201,"config":202},"GitLab Services",{"href":203,"dataGaName":204,"dataGaLocation":45},"/services/","services",{"text":206,"config":207},"TeamOps",{"href":208,"dataGaName":209,"dataGaLocation":45},"/teamops/","teamops",{"text":211,"config":212},"Community",{"href":213,"dataGaName":214,"dataGaLocation":45},"/community/","community",{"text":216,"config":217},"Forum",{"href":218,"dataGaName":219,"dataGaLocation":45},"https://forum.gitlab.com/","forum",{"text":221,"config":222},"Events",{"href":223,"dataGaName":224,"dataGaLocation":45},"/events/","events",{"text":226,"config":227},"Partners",{"href":228,"dataGaName":229,"dataGaLocation":45},"/partners/","partners",{"title":231,"links":232},"Company",[233,238,243,248,253,258,263,267,272,277,282,287],{"text":234,"config":235},"About",{"href":236,"dataGaName":237,"dataGaLocation":45},"/company/","company",{"text":239,"config":240},"Jobs",{"href":241,"dataGaName":242,"dataGaLocation":45},"/jobs/","jobs",{"text":244,"config":245},"Leadership",{"href":246,"dataGaName":247,"dataGaLocation":45},"/company/team/e-group/","leadership",{"text":249,"config":250},"Team",{"href":251,"dataGaName":252,"dataGaLocation":45},"/company/team/","team",{"text":254,"config":255},"Handbook",{"href":256,"dataGaName":257,"dataGaLocation":45},"https://handbook.gitlab.com/","handbook",{"text":259,"config":260},"Investor relations",{"href":261,"dataGaName":262,"dataGaLocation":45},"https://ir.gitlab.com/","investor relations",{"text":264,"config":265},"Sustainability",{"href":266,"dataGaName":264,"dataGaLocation":45},"/sustainability/",{"text":268,"config":269},"Diversity, inclusion and belonging (DIB)",{"href":270,"dataGaName":271,"dataGaLocation":45},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":273,"config":274},"Trust Center",{"href":275,"dataGaName":276,"dataGaLocation":45},"/security/","trust center",{"text":278,"config":279},"Newsletter",{"href":280,"dataGaName":281,"dataGaLocation":45},"/company/contact/","newsletter",{"text":283,"config":284},"Press",{"href":285,"dataGaName":286,"dataGaLocation":45},"/press/","press",{"text":288,"config":289},"Modern Slavery Transparency Statement",{"href":290,"dataGaName":291,"dataGaLocation":45},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":293,"links":294},"Contact Us",[295,300,305,310,315,320,325],{"text":296,"config":297},"Contact an expert",{"href":298,"dataGaName":299,"dataGaLocation":45},"/sales/","sales",{"text":301,"config":302},"Get help",{"href":303,"dataGaName":304,"dataGaLocation":45},"/support/","get help",{"text":306,"config":307},"Customer portal",{"href":308,"dataGaName":309,"dataGaLocation":45},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"text":311,"config":312},"Status",{"href":313,"dataGaName":314,"dataGaLocation":45},"https://status.gitlab.com/","status",{"text":316,"config":317},"Terms of use",{"href":318,"dataGaName":319,"dataGaLocation":45},"/terms/","terms of use",{"text":321,"config":322},"Privacy statement",{"href":323,"dataGaName":324,"dataGaLocation":45},"/privacy/","privacy statement",{"text":326,"config":327},"Cookie preferences",{"dataGaName":328,"dataGaLocation":45,"id":329,"isOneTrustButton":330},"cookie preferences","ot-sdk-btn",true,{"items":332},[333,335,337],{"text":316,"config":334},{"href":318,"dataGaName":319,"dataGaLocation":45},{"text":321,"config":336},{"href":323,"dataGaName":324,"dataGaLocation":45},{"text":326,"config":338},{"dataGaName":328,"dataGaLocation":45,"id":329,"isOneTrustButton":330},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"_path":344,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"logo":345,"subscribeLink":350,"navItems":354,"_id":366,"_type":31,"title":367,"_source":32,"_file":368,"_stem":369,"_extension":35},"/shared/en-us/the-source/navigation",{"altText":346,"config":347},"the source logo",{"src":348,"href":349},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750191004/t7wz1klfb2kxkezksv9t.svg","/the-source/",{"text":351,"config":352},"Subscribe",{"href":353},"#subscribe",[355,359,362],{"text":356,"config":357},"Artificial Intelligence",{"href":358},"/the-source/ai/",{"text":98,"config":360},{"href":361},"/the-source/security/",{"text":363,"config":364},"Platform & Infrastructure",{"href":365},"/the-source/platform/","content:shared:en-us:the-source:navigation.yml","Navigation","shared/en-us/the-source/navigation.yml","shared/en-us/the-source/navigation",{"_path":371,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"title":372,"description":373,"submitMessage":374,"formData":375,"_id":379,"_type":31,"_source":32,"_file":380,"_stem":381,"_extension":35},"/shared/en-us/the-source/newsletter","The Source Newsletter","Stay updated with insights for the future of software development.","You have successfully signed up for The Source’s newsletter.",{"config":376},{"formId":377,"formName":378,"hideRequiredLabel":330},1077,"thesourcenewsletter","content:shared:en-us:the-source:newsletter.yml","shared/en-us/the-source/newsletter.yml","shared/en-us/the-source/newsletter",{"_path":383,"_dir":20,"_draft":6,"_partial":6,"_locale":7,"slug":384,"type":385,"category":20,"config":386,"seo":391,"content":395,"_id":420,"_type":31,"title":7,"_source":32,"_file":421,"_stem":422,"_extension":35},"/en-us/the-source/ai/self-hosted-ai-balance-innovation-and-security-in-government","self-hosted-ai-balance-innovation-and-security-in-government","article",{"layout":9,"template":387,"featured":330,"articleType":388,"author":389,"gatedAsset":390},"TheSourceArticle","Regular","bob-stevens","source-lp-ai-for-air-gapped-environments",{"title":392,"description":393,"ogImage":394},"Self-hosted AI: Balance innovation & security in government","Discover how self-hosted models enable federal agencies to leverage artificial intelligence while maintaining strict security and compliance standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1752687563/vda4ouljcsv1z63bvs2p.png",{"title":396,"description":393,"date":397,"timeToRead":398,"keyTakeaways":399,"articleBody":403,"faq":404,"heroImage":394},"Self-hosted AI: Balance innovation and security in government","2025-07-22","5 min read",[400,401,402],"Self-hosted models allow federal agencies to use artificial intelligence while keeping sensitive data within secure, controlled environments that meet strict compliance requirements.","Government organizations gain better security, cost control, and custom solutions by running AI models on their own infrastructure rather than using cloud-based services.","Military branches like the Army, Air Force, and Defense Information Systems Agency are already deploying self-hosted AI tools for mission-critical operations.","Government agencies face strict rules that prevent them from using cloud technology for software development. This blocks their access to AI's transformative potential because most advanced AI solutions run in the cloud. The risks of sending data outside their networks and losing control over AI environments force them to find a more secure path.\n\nDespite these challenges, ignoring AI entirely isn't realistic. Agencies must integrate AI into software development to support [efficient software modernization](https://about.gitlab.com/the-source/ai/reducing-software-development-complexity-with-ai/). But how can they use AI tools to enhance productivity, improve security, and drive innovation without exposing themselves to the risks associated with cloud-based AI solutions?\n\nSelf-hosted AI models provide a strategic solution. By running and managing large language models (LLMs) and other advanced AI capabilities within their own secure infrastructure, whether in on-premises data centers or private cloud environments, agencies gain the control needed to leverage AI while maintaining strict compliance standards and advancing mission-critical applications.\n\n## Key benefits of a self-hosted AI strategy\n\nAfter working with federal agency tech leaders for many years, I understand that a statement like \"Let's just host it ourselves\" might raise some eyebrows. It's not always straightforward, especially with a technology as new as AI. However, evidence suggests that federal agencies and defense organizations are ready for a different approach.\n\nFor example, [the Pentagon is actively working on a \"fast pass\" approach](https://federalnewsnetwork.com/defense-news/2025/04/pentagon-to-establish-secure-software-assurance-program/) to securing software components, aiming to onboard approved software more quickly by using existing standards such as [Software Bill of Materials (SBOM)](https://about.gitlab.com/the-source/security/guide-to-dynamic-sboms/), the NIST Secure Software Development Framework (SSDF), and other common attestation methods and [risk assessments](https://about.gitlab.com/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain/).\n\nMeanwhile, the House Oversight and Government Reform Committee has been exploring ways to use IT modernization to enhance efficiency. And there's a broad groundswell of interest in finding ways to leverage AI in government.\n\nHere are several examples from the U.S. military:\n\n- The Defense Information Systems Agency is developing a [new data strategy](https://www.linkedin.com/pulse/disa-outlines-blueprint-new-data-strategy-u4jfc/?trackingId=hNpbXWugSH%2BukncYhngytA%3D%3D) that integrates data, analytics, and AI into all aspects of defense operations through a secure, self-hosted platform.\n- The Army is building [two new self-hosted AI tools](https://www.army.mil/article/283601/enhancing_military_operational_effectiveness_through_the_integration_of_camo_and_nipr_gpt), CamoGPT and NIPR GPT, to support predictive maintenance, analysis of adversaries' communications, logistics optimization, and evaluation of different proposed courses of action.\n- The Air Force Research Lab is developing an open-source platform, the [Air and Space Force Cognitive Engine](https://afresearchlab.com/technology/air-and-space-force-cognitive-engine/), a flexible, single IT platform for operationalizing AI within the Air Force.\n\nGovernment organizations see clear advantages when they host LLMs within their own secure infrastructure:\n- **Data sovereignty**: When working with sensitive national security information, the risks of external data processing and limited control over AI environments demand a more secure approach that keeps critical data within protected boundaries. Self-hosted environments ensure that level of security.\n- **Regulatory compliance**: Federal agencies must adhere to complex regulatory frameworks, including the Federal Risk and Authorization Management Program (FedRAMP), International Traffic in Arms Regulation (ITAR), Federal Information Security Modernization Act (FISMA), and agency-specific mandates. Self-hosted environments provide the detailed control necessary to implement specific security controls, audit trails, and governance frameworks that meet these strict requirements.\n- **Better security**: Self-hosted models dramatically reduce potential attack vectors by removing dependencies on external APIs and third-party infrastructure. Agencies maintain complete control over access management, network segmentation, and vulnerability patching within their AI systems.\n- **Custom solutions**: Unlike standard cloud solutions, agencies can choose from a list of supported AI models using specialized datasets tailored to their unique use cases and environments. This enables the development of more effective, purpose-built AI solutions that directly support mission objectives, whether by enhancing intelligence analysis, optimizing resources, or strengthening cybersecurity. This customization also facilitates [integration with legacy systems](https://about.gitlab.com/the-source/security/why-legacy-code-is-a-security-risk-and-how-ai-can-help/), a common challenge in the public sector.\n- **Cost control**: While the initial setup requires an investment in infrastructure and expertise, self-hosted AI models can provide more predictable long-term cost structures compared to variable subscription-based cloud models. This approach offers greater flexibility for large-scale deployments, leveraging existing infrastructure and personnel. Plus, self-hosted AI can offer a secure environment for modernizing legacy systems while maintaining direct oversight of sensitive code.\n\n## Fostering innovation within a trusted framework\n\nRunning AI in a secure, self-hosted environment supports innovation within a foundation of trust and control. Agencies can adopt open-source AI advances while maintaining security, compliance, and performance standards. This flexibility allows government developers and data scientists to build critical applications with security and compliance as foundational principles rather than afterthoughts.\n\nThe examples above clearly demonstrate that the U.S. government — particularly the Department of Defense — is serious about embracing the potential of AI to make their work more effective, efficient, and innovative. This movement is already well underway.\n\nFor federal agencies, integrating self-hosted AI models into software development workflows is essential for managing the complex web of security regulations while fostering innovation. Self-hosting allows AI to reach its full potential throughout the software development lifecycle. This enhances operational effectiveness, strengthens security, and accelerates the creation of more intelligent applications to safeguard national interests in an increasingly complex digital environment.",[405,408,411,414,417],{"header":406,"content":407},"What is self-hosted AI and how does it work for government agencies?","Self-hosted AI involves running large language models and AI capabilities within an agency's own secure infrastructure, either on-premises or in private cloud environments. This approach allows agencies to leverage AI tools while maintaining complete control over sensitive data and meeting strict compliance requirements.",{"header":409,"content":410},"Which military branches are currently using self-hosted AI tools?","The Army is building CamoGPT and NIPR GPT for predictive maintenance and logistics optimization. The Air Force Research Lab is developing the Air and Space Force Cognitive Engine platform. The Defense Information Systems Agency is integrating AI into defense operations via secure, self-hosted platforms.",{"header":412,"content":413},"What compliance standards must government self-hosted AI meet?","Government self-hosted AI must comply with FedRAMP, ITAR, FISMA, and agency-specific mandates. Self-hosted environments provide the detailed control needed for specific security controls, audit trails, and governance frameworks that meet these strict regulatory requirements.",{"header":415,"content":416},"How does self-hosted AI reduce security risks compared to cloud-based solutions?","Self-hosted AI dramatically reduces attack vectors by eliminating dependencies on external APIs and third-party infrastructure. Agencies maintain complete control over access management, network segmentation, and vulnerability patching within their AI systems, keeping sensitive data within protected boundaries.",{"header":418,"content":419},"What are the cost advantages of self-hosted AI for government agencies?","Self-hosted AI provides more predictable long-term cost structures compared to variable subscription-based cloud models. While requiring initial infrastructure investment, this approach offers greater flexibility for large-scale deployments and leverages existing government infrastructure and personnel resources.","content:en-us:the-source:ai:self-hosted-ai-balance-innovation-and-security-in-government:index.yml","en-us/the-source/ai/self-hosted-ai-balance-innovation-and-security-in-government/index.yml","en-us/the-source/ai/self-hosted-ai-balance-innovation-and-security-in-government/index",{"_path":424,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"type":425,"config":426,"seo":427,"content":430,"slug":20,"_id":444,"_type":31,"title":7,"_source":32,"_file":445,"_stem":446,"_extension":35},"/en-us/the-source/ai","category",{"layout":9},{"title":356,"description":428,"ogImage":429},"Explore expert insights on how AI is transforming software development, and how organizations can get the most out of their AI investments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463300/eoudcbj5aoucl0spsp0c.png",[431,436],{"componentName":432,"type":432,"componentContent":433},"TheSourceCategoryHero",{"title":356,"description":428,"image":434},{"config":435},{"src":429},{"componentName":437,"type":437,"componentContent":438},"TheSourceCategoryMainSection",{"config":439},{"gatedAssets":440},[441,442,443],"source-lp-how-to-get-started-using-ai-in-software-development","navigating-ai-maturity-in-devsecops","source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach","content:en-us:the-source:ai:index.yml","en-us/the-source/ai/index.yml","en-us/the-source/ai/index",{"_path":448,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"type":425,"config":449,"seo":450,"content":453,"slug":24,"_id":465,"_type":31,"title":7,"_source":32,"_file":466,"_stem":467,"_extension":35},"/en-us/the-source/security",{"layout":9},{"title":98,"description":451,"ogImage":452},"Get up to speed on how organizations can ensure they're staying on top of evolving security threats and compliance requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463273/aplkxrvwpii26xao5yhi.png",[454,458],{"componentName":432,"type":432,"componentContent":455},{"title":98,"description":451,"image":456},{"config":457},{"src":452},{"componentName":437,"type":437,"componentContent":459},{"config":460},{"gatedAssets":461},[462,463,464],"source-lp-guide-to-dynamic-sboms","source-lp-devsecops-the-key-to-modern-security-resilience","application-security-in-the-digital-age","content:en-us:the-source:security:index.yml","en-us/the-source/security/index.yml","en-us/the-source/security/index",{"_path":469,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"type":425,"config":470,"seo":471,"content":474,"slug":29,"_id":486,"_type":31,"title":7,"_source":32,"_file":487,"_stem":488,"_extension":35},"/en-us/the-source/platform",{"layout":9},{"title":363,"description":472,"ogImage":473},"Learn how to build a DevSecOps framework that sets your team up for success, from planning to delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463263/bdz7hmhpbmgwvoybcaud.png",[475,479],{"componentName":432,"type":432,"componentContent":476},{"title":363,"description":472,"image":477},{"config":478},{"src":473},{"componentName":437,"type":437,"componentContent":480},{"config":481},{"gatedAssets":482},[483,484,485],"source-lp-the-ultimate-playbook-for-high-performing-devsecops-teams","source-lp-measuring-success-in-software-development-a-guide-for-leaders","source-lp-building-a-resilient-software-development-practice","content:en-us:the-source:platform:index.yml","en-us/the-source/platform/index.yml","en-us/the-source/platform/index",{"amanda-rueda":490,"andre-michael-braun":491,"andrew-haschka":492,"ayoub-fandi":493,"bob-stevens":494,"brian-wald":495,"bryan-ross":496,"chandler-gibbons":497,"dave-steer":498,"ddesanto":499,"derek-debellis":500,"emilio-salvador":501,"erika-feldman":502,"george-kichukov":503,"gitlab":504,"grant-hickman":505,"haim-snir":506,"iganbaruch":507,"jlongo":508,"joel-krooswyk":509,"josh-lemos":510,"julie-griffin":511,"kristina-weis":512,"lee-faus":513,"ncregan":514,"rschulman":515,"sabrina-farmer":516,"sandra-gittlen":517,"sharon-gaudin":518,"stephen-walters":519,"taylor-mccaslin":520},"Amanda Rueda","Andre Michael Braun","Andrew Haschka","Ayoub Fandi","Bob Stevens","Brian Wald","Bryan Ross","Chandler Gibbons","Dave Steer","David DeSanto","Derek DeBellis","Emilio Salvador","Erika Feldman","George Kichukov","GitLab","Grant Hickman","Haim Snir","Itzik Gan Baruch","Joseph Longo","Joel Krooswyk","Josh Lemos","Julie Griffin","Kristina Weis","Lee Faus","Niall Cregan","Robin Schulman","Sabrina Farmer","Sandra Gittlen","Sharon Gaudin","Stephen Walters","Taylor McCaslin",{"amanda-rueda":490,"andre-michael-braun":491,"andrew-haschka":492,"ayoub-fandi":493,"bob-stevens":494,"brian-wald":495,"bryan-ross":496,"chandler-gibbons":497,"dave-steer":498,"ddesanto":499,"derek-debellis":500,"emilio-salvador":501,"erika-feldman":502,"george-kichukov":503,"gitlab":504,"grant-hickman":505,"haim-snir":506,"iganbaruch":507,"jlongo":508,"joel-krooswyk":509,"josh-lemos":510,"julie-griffin":511,"kristina-weis":512,"lee-faus":513,"ncregan":514,"rschulman":515,"sabrina-farmer":516,"sandra-gittlen":517,"sharon-gaudin":518,"stephen-walters":519,"taylor-mccaslin":520},[523,560,596],{"_path":524,"_dir":29,"_draft":6,"_partial":6,"_locale":7,"slug":525,"type":385,"category":29,"config":526,"seo":529,"content":533,"_id":557,"_type":31,"title":7,"_source":32,"_file":558,"_stem":559,"_extension":35},"/en-us/the-source/platform/beyond-the-portal-hype-why-you-need-a-platform-first","beyond-the-portal-hype-why-you-need-a-platform-first",{"layout":9,"template":387,"featured":330,"articleType":388,"author":527,"gatedAsset":528},"bryan-ross","source-lp-how-to-build-a-resilient-software-development-practice",{"title":530,"ogTitle":530,"description":531,"ogDescription":531,"ogImage":532},"Beyond the portal hype: Why you need a platform first","Discover why many internal developer portals fall short and why a platform-first approach is key to improving developer productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1752086082/z2udikxenysukvroywvb.png",{"title":530,"description":531,"date":534,"timeToRead":535,"heroImage":532,"keyTakeaways":536,"articleBody":540,"faq":541},"2025-07-15","6 min read",[537,538,539],"Most portal initiatives struggle with adoption because organizations underestimate the product management effort required for successful implementation and ongoing maintenance.","Start by building a robust platform with streamlined workflows and automation before investing in a portal interface; the value of any portal is entirely dependent on the capabilities of the underlying platform.","Consider whether tool consolidation might be more effective than integration; end-to-end solutions can simplify your ecosystem and reduce the need for the complex integrations that portals attempt to solve.","When Spotify released Backstage as an open source project in 2020, it sparked a wave of enthusiasm across the platform engineering community. The promise was compelling: a unified dashboard where developers could discover, access, and consume everything they needed to build software efficiently. Who wouldn't want a sleek “shop front” to simplify the increasingly complex world of software development?\n\nFast forward to today, and the reality has proven more complicated. Despite the initial excitement, many organizations struggle to realize the promised benefits of internal developer portals. \n\n## Portals vs. platforms: What’s the difference?\nAn internal developer portal is a “front door” to your technical ecosystem. It sits atop your developer platform, which integrates different tools to provide standardized workflows and underlying infrastructure and helps enforce governance. While the platform handles the technical implementation of tooling and automation, the portal provides a single pane of glass that makes development resources discoverable and accessible.\n\nBefore we get to the challenges around portals, it’s worth acknowledging the very real challenges they aim to address:\n1. **Discovery obstacles**: Many organizations lack an API catalog, causing developers to struggle to find existing software components, documentation, best practices, and support channels. Portals attempt to solve this by creating a centralized catalog where developers can access these resources through a unified search and navigation experience.\n1. **Tool sprawl**: The modern software development lifecycle relies on numerous specialized tools, each with its own interface and learning curve. [GitLab research](https://about.gitlab.com/developer-survey/) found that 62% of teams use six or more separate tools for software development. Portals address this by integrating these disparate tools behind a consistent interface, reducing the cognitive load of context switching.\n1. **Siloed knowledge**: Teams focused on their specific challenges often create their own workflows and toolchains, hampering cross-team collaboration and leading to duplicated work. Portals aim to break down these silos by making team assets visible across the organization and promoting standardized workflows that encourage collaboration and reuse of existing solutions.\nThese challenges have a measurable business impact: According to the [2024 GitLab Global DevSecOps Report](https://about.gitlab.com/developer-survey/), 78% of developers spend at least a quarter of their time maintaining and integrating toolchains.\n\n## Why portal initiatives often fall short\nIf internal developer portals address genuine business problems, why do these initiatives regularly fail to gain traction? In my conversations with technical leaders at companies of all sizes, I’ve noticed several key factors:\n1. **Insufficient product management**: Many organizations underinvest in release announcements, internal enablement examples, training, and other adoption-fueling activities essential for portal success.\n1. **Dependency on platform capabilities**: A portal is only as valuable as its underlying platform. Without robust platform capabilities, a portal merely presents a unified view of dysfunction.\n1. **Technical complexity**: Organizations often underestimate that a portal is not simply a tool to install but a software development framework requiring significant engineering skills to build and maintain.\n1. **Ongoing investment requirements**: Building and maintaining a portal demands substantial continuous investment, which many organizations underestimate during initial planning stages.\n1. **Limited developer resonance**: Despite being highly discussed in platform engineering circles, a recent CNCF App Development Working Group survey revealed that many developers remain unaware of Backstage — suggesting it may not address problems developers consider material to their work.\n\nThese challenges are particularly acute when building the portal’s frontend interface. A portal essentially functions as a wrapper built around existing tools, aiming to become the single source of truth for developer interactions.\n\nBut here's the catch: If your portal doesn't mirror enough of the functionality of those underlying tools, developers will bypass it and go straight to the underlying tools, making your portal just another item in an already crowded toolchain. At the same time, trying to keep up with feature changes across a dozen backend tools requires a massive ongoing effort. Every time a backend system changes or releases a new capability, the portal team faces the same question: implement, integrate, or ignore?  Providing a single pane of glass is a significant, perpetual engineering investment that most organizations underestimate.\n\n[Netflix, which has deep experience in developer tooling, puts it bluntly](https://www.youtube.com/watch?v=qgFyb28NvlQ): “A common front door for existing tools is insufficient on its own to attract and keep a user base. Rather [it] needs end-to-end experiences not available in other tools to keep users coming back and discovering the additional features and capabilities.”\n\n## The platform-first approach\nOrganizations that have successfully improved developer productivity typically follow a platform-first approach rather than beginning with a portal. Here’s what this looks like in practice:\n1. **Start with developer needs**: Don’t assume what developers need. Speak directly with teams about their challenges and work closely with them to develop solutions that demonstrably improve their day-to-day experiences.\n1. **Focus on platform capabilities first**: Prioritize creating streamlined, automated workflows for regular tasks that incorporate best practices and corporate standards. Any future portal's value will entirely depend on these underlying capabilities.\n1. **Consider tool consolidation before integration**: Portals primarily solve integration issues between tools by abstracting authentication methods and bringing data sources together. Before investing in complex integrations, evaluate whether consolidating tools might simplify your ecosystem. End-to-end solutions across the software development lifecycle can reduce the need for extensive integration work.\n1. **Invest in product management**: Ensure strong product management to encourage platform adoption by new teams and drive new capability adoption by teams who have already embraced the platform.\n\n## When portals make sense\nThis isn’t to say that internal developer portals are inherently flawed. In fact, I’ve worked with several large, mature organizations that successfully use internal developer portals like Backstage, but with a crucial difference in approach and expectations.\n\nOne large financial institution I worked with recently has had tremendous feedback from their portal implementation. Rather than trying to create a single pane of glass for all development activities, their portal was built to serve two specific workflows: developer onboarding and new project scaffolding. When a developer joins a team, the portal guides them through account setup across six different systems, automatically provisioning access based on their team assignment. For new projects, the portal provides developers with an intuitive interface to select an appropriate template and configure it to their needs. The portal then triggers the necessary backend systems to build the required project scaffolding, including an initial code repository and a CI/CD pipeline with [policy-driven testing](https://about.gitlab.com/blog/how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops/) and [infrastructure-as-code](https://about.gitlab.com/blog/using-ansible-and-gitlab-as-infrastructure-for-code/) to deploy the application.\n\nSuccessful implementations like this leverage portals for activities that genuinely benefit from a simplified point-and-click interface. The portal doesn't try to be the primary interface for all activity; developers still work directly in their IDEs, Git repositories, and monitoring dashboards.\nCritically, organizations with successful developer portals build solid, capable internal developer platforms first. They also have mature approaches to gathering developer feedback to direct their efforts to real-world points of friction.\n\n## The path forward\nThe message for technical leaders navigating the platform engineering landscape is clear: Start with a strong platform rather than focusing primarily on a portal. Prioritize creating tangible value for developers through automation, standardization, and simplified workflows. Once your platform capabilities mature and deliver measurable benefits, consider adding a portal as an enhancement if specific needs warrant it.\n\nBy taking this measured approach, you'll avoid the common pitfall of implementing a beautiful dashboard that sits atop dysfunction — and instead build developer tooling that genuinely improves productivity, reduces cognitive load, and accelerates innovation.",[542,545,548,551,554],{"header":543,"content":544},"What's the difference between an internal developer portal and a platform?","An internal developer portal is a \"front door\" interface that sits atop your developer platform. The platform handles technical implementation, tooling, and automation with standardized workflows, while the portal provides a single pane of glass that makes development resources discoverable and accessible.",{"header":546,"content":547},"How much time do developers spend on toolchain maintenance and integration?","According to the 2024 GitLab Global DevSecOps Report, 78% of developers spend at least a quarter of their time maintaining and integrating toolchains. GitLab research also found that 62% of teams use six or more separate tools for software development.",{"header":549,"content":550},"Why do internal developer portal initiatives often fail?","Portal initiatives fail due to insufficient product management, dependency on weak platform capabilities, underestimated technical complexity, ongoing investment requirements, and limited developer resonance. Many organizations underestimate that portals require significant continuous engineering investment to maintain feature parity with underlying tools.",{"header":552,"content":553},"What should organizations prioritize before building a developer portal?","Organizations should follow a platform-first approach: start with developer needs assessment, focus on platform capabilities with streamlined automated workflows, consider tool consolidation before integration, and invest in strong product management for adoption. Build robust platform capabilities before adding portal interfaces.",{"header":555,"content":556},"When do internal developer portals make sense to implement?","Portals work best for specific workflows like developer onboarding and new project scaffolding rather than trying to be a single pane of glass for all activities. Successful implementations focus on activities that genuinely benefit from simplified point-and-click interfaces while developers continue using specialized tools directly.","content:en-us:the-source:platform:beyond-the-portal-hype-why-you-need-a-platform-first:index.yml","en-us/the-source/platform/beyond-the-portal-hype-why-you-need-a-platform-first/index.yml","en-us/the-source/platform/beyond-the-portal-hype-why-you-need-a-platform-first/index",{"_path":561,"_dir":20,"_draft":6,"_partial":6,"_locale":7,"slug":562,"type":385,"category":20,"config":563,"seo":565,"content":569,"_id":593,"_type":31,"title":7,"_source":32,"_file":594,"_stem":595,"_extension":35},"/en-us/the-source/ai/three-ways-to-operationalize-ai-for-engineering-teams","three-ways-to-operationalize-ai-for-engineering-teams",{"layout":9,"template":387,"featured":330,"articleType":388,"author":564,"gatedAsset":441},"sabrina-farmer",{"title":566,"description":567,"ogImage":568},"Three ways to operationalize AI for engineering teams","Discover three actionable frameworks for engineering leaders to implement AI strategically, drive measurable ROI, and overcome adoption barriers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751908411/i1mwfh3egxgbx5ijkowi.png",{"title":566,"description":567,"date":570,"timeToRead":571,"keyTakeaways":572,"articleBody":576,"faq":577,"heroImage":568},"2025-07-08","4 min read",[573,574,575],"AI adoption succeeds when positioned as a collaborative development partner — similar to pair programming — with specific applications like enhanced debugging, solution architecture, and code quality assurance rather than a replacement for engineers.","Strategic AI implementation requires role-specific applications with clear ROI targets, seamless workflow integration that minimizes friction, and structured feedback loops that connect AI initiatives directly to business outcomes.","Incremental implementation victories, rather than wholesale transformation, drive successful AI adoption — with success measured through problem-solving effectiveness and business impact instead of traditional productivity metrics.","Technical leaders face mounting pressure to adopt AI tools, but many struggle to move beyond experimentation to systematic implementation that delivers measurable ROI. While AI's potential for software development is clear, the path to operationalization remains challenging.\n\n[GitLab research](https://about.gitlab.com/developer-survey/2024/ai/) reveals that approximately half of organizations are still in the evaluation and exploration stage of AI maturity. These teams recognize AI's potential but haven't crystallized their implementation strategy, a common challenge I've observed when speaking with engineering executives.\n\n## Breaking through implementation barriers\n\nTwo critical obstacles stand in the way of successful AI adoption. First is the fear that AI will replace human engineers — a legitimate concern requiring transparent communication from leadership. Second, it is important to determine where to begin implementing AI when many engineers see limited value in disrupting established workflows.\n\nTechnical leaders must reframe AI’s value proposition by connecting AI capabilities directly to business outcomes. [Success metrics](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/) should focus on problem-solving effectiveness and business impact rather than code volume or traditional individual productivity measures.\n\nRather than viewing AI as a threat to jobs, help your teams consider it through the lens of established collaborative practices like pair programming. This familiar framework provides clear entry points for AI integration:\n\n* **Enhanced debugging partner**: AI functions as a sophisticated \"[rubber duck](https://rubberduckdebugging.com/)\" that not only listens but responds with actionable insights\n* **Solution architect**: AI can generate multiple implementation approaches to complex problems within seconds\n* **Code quality guardian**: AI can help teams identify optimization opportunities and vulnerabilities before human review\n\nWhen positioned as an augmentation layer that eliminates repetitive tasks and amplifies human creativity, AI becomes an enabler rather than a threat.\n\n## A three-step implementation framework for technical leaders\n\nTo integrate AI into team workflows, leadership must first establish the context and then take a top-down approach to implementation. Specifically, leaders must define how teams will use AI, establish clear processes, and provide the necessary resources and support. Rather than overhauling your team's existing workflows entirely, apply AI to specific tasks or stages of the development process. This iterative approach allows teams to learn, adapt, and build confidence in AI over time.\n\n### 1. Define role-specific AI applications with clear ROI\n\nInstead of vague directives, specify exactly how different roles will leverage AI:\n\n* **Developers**: Ensure a consistent and thorough initial analysis and mandate AI-powered first code reviews and security scans before your human review. Leveraging AI first to analyze code for potential bugs, vulnerabilities, and performance issues can provide developers with actionable insights for remediation, while also creating learning moments.\n* **Quality assurance (QA) engineers**: Use AI to generate the first test for new code and analyze test results, freeing developers to focus on more complex testing scenarios and critical issues. Editing a proposed test is typically easier than generating it from scratch.\n* **Operations teams**: Implement AI to automate repetitive operational tasks such as deployments and infrastructure management and monitoring to free up operations teams' time for more strategic work.\n* **Team leads**: Leverage AI to assist with project planning, backlog prioritization, resource allocation, initial triage, and progress tracking, providing team leads with real-time insights into project health and potential risks.\n* **Product managers**: Use AI to analyze and summarize customer verticals, market trends, customer forums, and overall customer sentiment.\n\n### 2. Integrate AI seamlessly into existing workflows\n\nSelect AI solutions that seamlessly integrate into your existing development environment to avoid additional burdens on your developers. To avoid decision fatigue, develop clear guidelines for when and how to use AI tools, including:\n\n* When to rely on AI-generated suggestions\n* How to critically evaluate AI recommendations\n* What feedback mechanisms exist for improving AI outputs\n\n### 3. Create feedback loops and measure business impact\n\nEstablish structured communication channels for engineers to share AI wins and challenges. Create internal communities of practice around AI integration to accelerate knowledge sharing. Encourage developers to interact with the AI, provide feedback on generated code, refine test cases, and actively participate in the collaborative process.\n\nAfter implementation, quantify and communicate the business impact to executive stakeholders. It’s important to position AI not as experimental technology but as a strategic lever for competitive advantage and engineering excellence.\n\n## Moving beyond experimentation\n\nThe key to successful AI operationalization is targeted implementation with clear business objectives. By defining role-specific applications, creating seamless integration points, and establishing feedback mechanisms, engineering leaders can transform AI from an interesting curiosity to a foundational productivity multiplier.\n\nSuccess will not come from wholesale workflow transformation but through incremental victories demonstrating tangible value. With this structured approach, technical leaders can unlock AI's true potential while ensuring their teams feel empowered rather than threatened by this technological evolution.",[578,581,584,587,590],{"header":579,"content":580},"What percentage of organizations are still evaluating AI implementation?","Approximately half of organizations remain in the evaluation and exploration stage of AI maturity. These teams recognize AI's potential but haven't crystallized their implementation strategy, creating a common challenge for engineering executives moving beyond experimentation.",{"header":582,"content":583},"How should engineering leaders position AI to overcome adoption resistance?","Leaders should reframe AI as a collaborative development partner similar to pair programming rather than a replacement. Position AI as an enhanced debugging partner, solution architect, and code quality guardian that eliminates repetitive tasks while amplifying human creativity.",{"header":585,"content":586},"What are the three key steps for implementing AI in engineering workflows?","First, define role-specific AI applications with clear ROI for developers, QA engineers, operations teams, team leads, and product managers. Second, integrate AI seamlessly into existing development environments. Third, create feedback loops and measure business impact through structured communication channels.",{"header":588,"content":589},"How should AI success be measured in engineering teams?","Success metrics should focus on problem-solving effectiveness and business impact rather than code volume or traditional productivity measures. Quantify business impact for executive stakeholders and position AI as a strategic lever for competitive advantage and engineering excellence.",{"header":591,"content":592},"What AI applications work best for different engineering roles?","Developers use AI for code reviews and security scans. QA engineers leverage AI for test generation and result analysis. Operations teams implement AI for deployments and infrastructure monitoring. Team leads use AI for project planning and progress tracking. Product managers apply AI for customer sentiment analysis.","content:en-us:the-source:ai:three-ways-to-operationalize-ai-for-engineering-teams:index.yml","en-us/the-source/ai/three-ways-to-operationalize-ai-for-engineering-teams/index.yml","en-us/the-source/ai/three-ways-to-operationalize-ai-for-engineering-teams/index",{"_path":597,"_dir":29,"_draft":6,"_partial":6,"_locale":7,"config":598,"seo":599,"content":603,"type":385,"slug":626,"category":29,"_id":627,"_type":31,"title":7,"_source":32,"_file":628,"_stem":629,"_extension":35},"/en-us/the-source/platform/transform-your-platform-onboarding-for-higher-adoption-rates",{"layout":9,"template":387,"articleType":388,"author":527,"featured":330,"gatedAsset":463},{"title":600,"description":601,"ogImage":602},"Transform your platform onboarding for higher adoption rates","Redesign your platform onboarding to boost adoption, reduce friction, and create seamless experiences for development teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463510/hm90bhwzptl1b2gwovhx.png",{"title":600,"date":604,"description":601,"timeToRead":571,"heroImage":602,"keyTakeaways":605,"articleBody":609,"faq":610},"2025-07-01",[606,607,608],"A weak onboarding experience can significantly impact platform adoption, with research showing that one-third of users consider abandoning platforms after poor experiences.","Simple improvements like creating an intuitive landing page, writing clear documentation, and automating access processes can dramatically increase user adoption and satisfaction.","Building effective support systems across multiple channels (chat, email, ticketing) creates trust and ensures users can quickly overcome obstacles during their onboarding journey.","In my work with platform teams across industries, from startups to enterprises, I’ve noticed a consistent blind spot: the onboarding experience. While teams focus intensely on building robust features, they often neglect how new users first encounter their platform - and this oversight can severely limit adoption.\n\nAccording to the [diffusion of innovations theory](https://en.wikipedia.org/wiki/Diffusion_of_innovations), most platforms achieve about 16% adoption before stagnating. That's because innovators and early adopters - representing about 16% of an organization - are often willing to tolerate rough edges, motivated by novelty or vision. The early majority, comprising 34%, is key to going mainstream. They prioritize proven reliability, a clear value proposition, and ease of use. This shift in expectations is the chasm where many platform teams stumble. Your early adopters might forgive a clunky onboarding process, but the early majority won’t.\n\n![Diffusion of Innovation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176125/Blog/k6kxdtokv4laph4exsdt.png)\n\n## Start with a memorable, future-proof name\nThe platform's name is likely the first part of the platform that users will engage with. Choose something unique within your organization that’s easy to spell and not tied to specific technologies.\n\nEffective platform names often:\n\n**Reflect your value proposition** rather than the underlying technology. For example, try a name such as “Runway” that reflects the value proposition of helping teams launch faster instead of something more literal like “K8sPipeline.”\n\n**Use simple, memorable words** that evoke the platform’s purpose. Can someone easily understand and spell it after hearing it once? Choosing something simple and easy to remember, such as “Beacon,” will likely serve you better than a unique or creative option such as “Syzygy.”\n\nAvoid these common pitfalls:\n- **Version numbers in names** signal previous failures and raise doubts about longevity.\n- **Generic three-letter acronyms** become instantly forgettable in a sea of other TLAs.\n- **Technology-based names** suggest you prioritize tools over user needs.\n\n## Develop a multi-channel communication strategy\nEffective platform adoption requires deliberate communication planning across multiple channels, from a product website that clearly articulates your platform’s value proposition to user-centric documentation and email updates. Your communication strategy should also include a reliable health dashboard that gives users visibility into known issues and their resolution status. Remember that in enterprise environments, how you communicate about your platform often matters as much as the platform itself. Invest in communication with the same care you invest in your technical infrastructure.\n\n> [Learn more about building a comprehensive communication framework for platform engineering](https://about.gitlab.com/the-source/platform/building-a-communication-strategy-for-platform-engineering-teams/).\n\n## Simplify the access process\nTeams often spend months perfecting platform features while neglecting the most basic step: making it easy to access the platform.\n\nI’ve seen many examples of this at organizations of all sizes, across every industry. Common barriers include:\n\n**Manual onboarding processes** for supposedly self-service platforms. If you can’t fully automate the process, do your best to perform human-in-the-loop tasks asynchronously.\n\n**Time-consuming approval steps** or other barriers that delay initial exploration. One great solution to this is to offer immediate, temporary access to your platform for free for 30 days. This is long enough for someone to decide if your platform helps them and raise the necessary request to gain full access.\n\n**Mandatory training requirements** before users can begin. Training is valuable, but it should be required within a period of joining the platform rather than being a prerequisite.\n\n## Don’t neglect design and tone\nFirst impressions are largely visual. An outdated or inconsistent interface can deter users even if your functionality is excellent. Pay attention to branding, color schemes, and the tone of your messaging. These details might seem trivial, but they set the tone for user engagement.\n\nAim for clear, human communication rather than technical jargon. A user-friendly tone makes your platform more approachable to diverse stakeholders.\n\n## Build responsive support systems\nEven the best platforms need support, and nothing builds trust faster than responsive help when users encounter problems. Your primary goal during support interactions should be minimizing user frustration.\n\nCreate an effective support framework by leveraging multiple channels:\n- **Support tickets** provide accountability and integration with other systems.\n- **Email communication** works well for complex topics requiring clarity.\n- **Chat systems** enable real-time problem-solving when users are “in the flow.”\n\nBe present where your users are, even if that means monitoring multiple communication tools. Aim to answer chat queries within 30-60 minutes, and always follow up publicly so others can benefit from solutions.\n\n## The path to successful platform adoption\nOrganizations that prioritize user experience from day one gain significant advantages in adoption rates and user satisfaction. By creating intuitive onboarding processes, clear documentation, and responsive support systems, you transform the user journey from frustration to delight.\n\nRemember that your platform users are making a critical decision: whether your solution deserves their time and trust. A thoughtful onboarding experience tells them you value that investment - and dramatically increases your chances of widespread adoption.",[611,614,617,620,623],{"header":612,"content":613},"Why is platform onboarding so important to user adoption?","Poor onboarding experiences are a leading cause of stalled platform adoption. Research shows that one-third of users consider abandoning platforms after a frustrating first encounter. A thoughtful, streamlined onboarding process helps build trust and accelerates user engagement.",{"header":615,"content":616},"What are the most common onboarding mistakes platform teams make?","Teams often over-engineer platform features while neglecting usability basics. Common mistakes include clunky access processes, mandatory training before usage, poor visual design, inconsistent messaging, and weak support channels, all of which discourage adoption.",{"header":618,"content":619},"How can platform teams improve onboarding access without sacrificing control?","Offer temporary, self-service access, such as a 30-day trial, to remove early friction. If full automation isn’t possible, use asynchronous human-in-the-loop onboarding and avoid approval-heavy workflows that delay initial exploration and testing.",{"header":621,"content":622},"What role does naming and communication play in platform success?","A clear, future-proof name and consistent multi-channel communication strategy help build platform recognition and trust. Names should reflect user value, not technology, while communication must include user-focused documentation, health dashboards, and regular updates.",{"header":624,"content":625},"How should platform support be structured during onboarding?","Support should be fast, responsive, and multi-modal. Use tickets for tracking, email for clarity, and chat for real-time help. Aim for quick response times and always share publicly resolved issues to benefit all users.","transform-your-platform-onboarding-for-higher-adoption-rates","content:en-us:the-source:platform:transform-your-platform-onboarding-for-higher-adoption-rates:index.yml","en-us/the-source/platform/transform-your-platform-onboarding-for-higher-adoption-rates/index.yml","en-us/the-source/platform/transform-your-platform-onboarding-for-higher-adoption-rates/index",[631,642,653],{"_path":524,"_dir":29,"_draft":6,"_partial":6,"_locale":7,"slug":525,"type":385,"category":29,"config":632,"seo":633,"content":634,"_id":557,"_type":31,"title":7,"_source":32,"_file":558,"_stem":559,"_extension":35},{"layout":9,"template":387,"featured":330,"articleType":388,"author":527,"gatedAsset":528},{"title":530,"ogTitle":530,"description":531,"ogDescription":531,"ogImage":532},{"title":530,"description":531,"date":534,"timeToRead":535,"heroImage":532,"keyTakeaways":635,"articleBody":540,"faq":636},[537,538,539],[637,638,639,640,641],{"header":543,"content":544},{"header":546,"content":547},{"header":549,"content":550},{"header":552,"content":553},{"header":555,"content":556},{"_path":597,"_dir":29,"_draft":6,"_partial":6,"_locale":7,"config":643,"seo":644,"content":645,"type":385,"slug":626,"category":29,"_id":627,"_type":31,"title":7,"_source":32,"_file":628,"_stem":629,"_extension":35},{"layout":9,"template":387,"articleType":388,"author":527,"featured":330,"gatedAsset":463},{"title":600,"description":601,"ogImage":602},{"title":600,"date":604,"description":601,"timeToRead":571,"heroImage":602,"keyTakeaways":646,"articleBody":609,"faq":647},[606,607,608],[648,649,650,651,652],{"header":612,"content":613},{"header":615,"content":616},{"header":618,"content":619},{"header":621,"content":622},{"header":624,"content":625},{"_path":654,"_dir":29,"_draft":6,"_partial":6,"_locale":7,"slug":655,"type":385,"category":29,"config":656,"seo":659,"content":663,"_id":670,"_type":31,"title":7,"_source":32,"_file":671,"_stem":672,"_extension":35},"/en-us/the-source/platform/accelerate-embedded-development-in-software-defined-vehicles","accelerate-embedded-development-in-software-defined-vehicles",{"layout":9,"template":387,"featured":6,"articleType":657,"gatedAsset":658},"Guide","pf-accelerate-embedded-development-in-software-defined-vehicles",{"noIndex":6,"title":660,"ogTitle":660,"description":661,"ogDescription":661,"ogImage":662},"Accelerate embedded development in software-defined vehicles","Learn how DevSecOps transforms automotive embedded development. Reduce feedback cycles from weeks to hours while maintaining safety compliance.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1752239485/acehu4zl6nv8dntuafvx.png",{"title":660,"description":661,"date":664,"heroImage":662,"keyTakeaways":665,"articleBody":669},"2025-06-30",[666,667,668],"Modern automotive development faces unprecedented complexity with millions of lines of code across dozens of ECUs. Traditional approaches with weeks-long feedback cycles and manual processes cannot scale.","Leading manufacturers achieve dramatic improvements through DevSecOps: automated workflows reduce feedback from weeks to hours, integrated hardware testing eliminates bottlenecks, and compliance automation.","Real results include a reduction in feedback cycles from 4-6 weeks to 30 minutes, increased Linux build frequency, and simplified build systems.","The automotive industry is undergoing its most significant transformation since the assembly line. With the advent of electric vehicles (EVs) and software-defined vehicles (SDVs), software powers everything from advanced driver assistance to infotainment systems. However, the complexity of modern vehicles creates unprecedented development challenges that traditional approaches cannot address.\n\nToday's connected vehicles contain millions of lines of code across dozens of electronic control units. Autonomous vehicles push this complexity even further, requiring real-time processing, cybersecurity integration, and seamless coordination between hardware and software systems. Development teams struggle with feedback cycles measured in weeks, manual security testing processes, and disconnected compliance workflows that create bottlenecks and increase costs.\n\nForward-thinking automotive manufacturers are solving these challenges through comprehensive DevSecOps transformation. By integrating development, security, and operations into unified workflows, they're achieving remarkable results: feedback cycles reduced from weeks to hours, automated compliance with automotive cybersecurity standards, and development velocity that scales with business growth.\n\nThe transformation centers on end-to-end workflow automation that eliminates the inefficiencies of traditional embedded development. Instead of developers working in isolation with inconsistent build environments, leading companies implement automated pipelines that ensure consistency and reliability. \n\nCollaborative code review processes catch security vulnerabilities early when they're less expensive to fix — particularly critical for safety-critical vehicle security applications. And by codifying compliance requirements and enforcing them automatically through customizable frameworks, organizations can ensure compliance is built into the process rather than bolted on afterward.\n\nHardware testing integration represents another breakthrough. Unlike enterprise software, automotive embedded code must be tested on target hardware or accurate simulations. Innovative manufacturers are connecting cloud-based processors, virtual hardware simulators, and physical test benches directly to automated workflows. This eliminates manual scheduling bottlenecks and enables continuous testing, dramatically increasing utilization of expensive test hardware.\n\nThe results speak for themselves. With a comprehensive DevSecOps platform, one auto manufacturer is now able to process over 120,000 CI/CD jobs daily, supporting massive repositories while maintaining the rigorous security standards required for automotive industry applications.\n\nAs SDVs and EVs reshape the competitive landscape, software development capability becomes a strategic differentiator. Companies that successfully transform their embedded development practices through comprehensive DevSecOps approaches position themselves to lead in the software-defined future, while those that don't risk falling behind as the industry accelerates into its next chapter.\n\nDownload the complete guide to discover real-world implementations, detailed case studies, and proven strategies for transforming your automotive embedded development practices.","content:en-us:the-source:platform:accelerate-embedded-development-in-software-defined-vehicles:index.yml","en-us/the-source/platform/accelerate-embedded-development-in-software-defined-vehicles/index.yml","en-us/the-source/platform/accelerate-embedded-development-in-software-defined-vehicles/index",[674,690,703],{"_path":675,"_dir":676,"_draft":6,"_partial":6,"_locale":7,"config":677,"title":679,"description":680,"link":681,"_id":687,"_type":31,"_source":32,"_file":688,"_stem":689,"_extension":35},"/shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops","gated-assets",{"id":442,"formId":678},1002,"Navigating AI maturity in DevSecOps","Read our survey findings from more than 5,000 DevSecOps professionals worldwide for insights on how organizations are incorporating AI into the software development lifecycle.",{"text":682,"config":683},"Read the report",{"href":684,"dataGaName":685,"dataGaLocation":686},"https://about.gitlab.com/developer-survey/2024/ai/","Navigating AI Maturity in DevSecOps","thesource","content:shared:en-us:the-source:gated-assets:navigating-ai-maturity-in-devsecops.yml","shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops.yml","shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops",{"_path":691,"_dir":676,"_draft":6,"_partial":6,"_locale":7,"config":692,"title":693,"description":694,"link":695,"_id":700,"_type":31,"_source":32,"_file":701,"_stem":702,"_extension":35},"/shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach",{"id":443},"AI guide for enterprise leaders: Building the right approach","Download our guide for enterprise leaders to learn how to prepare your C-suite, executive leadership, and development teams for what AI can do today — and will do in the near future — to accelerate software development.",{"text":696,"config":697},"Read the guide",{"href":698,"dataGaName":699,"dataGaLocation":686},"https://about.gitlab.com/the-source/ai/ai-guide-for-enterprise-leaders-building-the-right-approach//","AI Guide For Enterprise Leaders: Building the Right Approach","content:shared:en-us:the-source:gated-assets:source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach.yml","shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach.yml","shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach",{"_path":704,"_dir":676,"_draft":6,"_partial":6,"_locale":7,"config":705,"title":706,"description":707,"link":708,"_id":713,"_type":31,"_source":32,"_file":714,"_stem":715,"_extension":35},"/shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development",{"id":441,"formId":678},"How to get started using AI in software development","Learn how to strategically implement AI to boost efficiency, security, and reduce context switching. Empower every member of your team with AI capabilities.",{"text":709,"config":710},"Download the guide",{"href":711,"dataGaName":712,"dataGaLocation":686},"https://about.gitlab.com/the-source/ai/getting-started-with-ai-in-software-development-a-guide-for-leaders/","How to Get Started Using AI in Software Development","content:shared:en-us:the-source:gated-assets:source-lp-how-to-get-started-using-ai-in-software-development.yml","shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development.yml","shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development",[717,728,739],{"_path":383,"_dir":20,"_draft":6,"_partial":6,"_locale":7,"slug":384,"type":385,"category":20,"config":718,"seo":719,"content":720,"_id":420,"_type":31,"title":7,"_source":32,"_file":421,"_stem":422,"_extension":35},{"layout":9,"template":387,"featured":330,"articleType":388,"author":389,"gatedAsset":390},{"title":392,"description":393,"ogImage":394},{"title":396,"description":393,"date":397,"timeToRead":398,"keyTakeaways":721,"articleBody":403,"faq":722,"heroImage":394},[400,401,402],[723,724,725,726,727],{"header":406,"content":407},{"header":409,"content":410},{"header":412,"content":413},{"header":415,"content":416},{"header":418,"content":419},{"_path":561,"_dir":20,"_draft":6,"_partial":6,"_locale":7,"slug":562,"type":385,"category":20,"config":729,"seo":730,"content":731,"_id":593,"_type":31,"title":7,"_source":32,"_file":594,"_stem":595,"_extension":35},{"layout":9,"template":387,"featured":330,"articleType":388,"author":564,"gatedAsset":441},{"title":566,"description":567,"ogImage":568},{"title":566,"description":567,"date":570,"timeToRead":571,"keyTakeaways":732,"articleBody":576,"faq":733,"heroImage":568},[573,574,575],[734,735,736,737,738],{"header":579,"content":580},{"header":582,"content":583},{"header":585,"content":586},{"header":588,"content":589},{"header":591,"content":592},{"_path":740,"_dir":20,"_draft":6,"_partial":6,"_locale":7,"slug":741,"type":385,"category":20,"config":742,"seo":744,"content":748,"_id":755,"_type":31,"title":7,"_source":32,"_file":756,"_stem":757,"_extension":35},"/en-us/the-source/ai/transform-automotive-embedded-development-with-ai","transform-automotive-embedded-development-with-ai",{"layout":9,"template":387,"featured":6,"articleType":657,"gatedAsset":743},"pf-transform-automotive-embedded-development-with-ai",{"description":745,"ogTitle":746,"title":746,"ogDescription":745,"ogImage":747,"noIndex":6},"Discover 10 AI use cases that accelerate automotive embedded development cycles, from ECU code generation to HIL testing and security optimization.","Transform automotive embedded development with AI","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463713/gelqfjmbdqaschyh5ban.png",{"title":746,"description":745,"heroImage":747,"keyTakeaways":749,"articleBody":753,"date":754},[750,751,752],"AI-powered code generation and automated testing reduce manual effort while maintaining quality standards for safety-critical automotive systems.","Automated vulnerability detection and resolution help embedded systems meet functional safety and cybersecurity regulations faster.","AI systems provide root cause analysis for failed tests and optimize resource utilization in constrained embedded environments.","The automotive industry stands at a pivotal transformation point. By 2035, every vehicle will be software-defined and AI-powered, and the average car is expected to contain 650 million lines of code by the end of 2025. The shift toward electric vehicles and software-defined vehicles demands a fundamental change in how original equipment manufacturers (OEMs) approach embedded development.\n\nToday's vehicles are essentially computers on wheels, packed with sophisticated embedded systems that control everything from infotainment to critical safety systems. The complexity of modern automotive embedded systems requires new approaches to development, testing, and deployment that can keep pace with fierce market competition.\n\nArtificial intelligence (AI) is emerging as the key accelerator for embedded development teams. Recent industry data shows that 66% of development teams in the automotive industry already use AI in their software development lifecycle — the highest adoption rate among all industries surveyed. This technology is revolutionizing how teams approach fundamental challenges in the development of autonomous and connected vehicles.\n\nAI transforms traditional embedded development workflows across multiple dimensions. Code generation capabilities help developers write firmware for electronic control units (ECUs) in C/C++ more efficiently, reducing the time spent on boilerplate code while maintaining consistency across complex automotive systems. These AI systems understand the context of existing codebases and can suggest relevant implementations for standard functions.\n\nTesting and validation represent another critical area where AI delivers substantial value. AI can automatically generate comprehensive unit tests for ECU functions, covering edge cases that human developers might miss. This capability proves essential for safety systems where thorough testing is mandatory for regulatory compliance.\n\nSecurity remains paramount in modern vehicle development. AI-powered vulnerability analysis helps embedded developers understand and resolve security issues detected in their code. These systems can explain potential risks in clear terms and suggest appropriate remediation strategies optimized for resource-constrained microcontrollers.\n\nHardware-in-the-loop (HIL) testing, crucial for validating autonomous driving systems, benefits significantly from AI-driven root cause analysis. When HIL tests fail, AI can parse extensive log files and identify error patterns, dramatically reducing debugging time and accelerating the overall testing cycle.\n\nLegacy code management poses ongoing challenges as teams transition between ECU generations. AI-powered code explanation capabilities help developers understand older codebases, reducing the risk of breaking critical pathways during migration. This proves especially valuable when documentation is incomplete or outdated.\n\nResource optimization for severely constrained environments also becomes more manageable with AI assistance. These systems can identify memory leaks, inefficient algorithms, and CPU-intensive operations that need optimization, ensuring firmware meets strict resource requirements while maintaining real-time performance.\n\nFinally, visual interface development for infotainment systems using frameworks like Qt/QML benefits from AI-enhanced code reviews that catch issues early in development. This prevents costly delays and integration problems that often occur late in the development process.\n\nThe future of automotive embedded development belongs to organizations that successfully integrate AI throughout their development lifecycle. As technology complexity grows, AI becomes not just an advantage but a necessity for maintaining competitive position in the rapidly evolving automotive market.\n\nDownload the complete guide to explore all 10 AI use cases and discover how to implement these transformative solutions in your embedded development workflow.","2025-07-03","content:en-us:the-source:ai:transform-automotive-embedded-development-with-ai:index.yml","en-us/the-source/ai/transform-automotive-embedded-development-with-ai/index.yml","en-us/the-source/ai/transform-automotive-embedded-development-with-ai/index",[759,778,816],{"_path":760,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"config":761,"seo":763,"content":767,"type":385,"slug":774,"category":24,"_id":775,"_type":31,"title":7,"_source":32,"_file":776,"_stem":777,"_extension":35},"/en-us/the-source/security/the-key-to-innovation-and-compliance-in-financial-services",{"layout":9,"template":387,"articleType":657,"featured":6,"gatedAsset":762},"pf-the-key-to-innovation-and-compliance-in-financial-services",{"title":764,"description":765,"ogImage":766},"The key to innovation and compliance in financial services","Discover how financial services organizations can accelerate innovation while staying on top of complex regulatory requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463848/fap7fcimrxis5lyfnizg.png",{"title":764,"date":768,"description":765,"heroImage":766,"keyTakeaways":769,"articleBody":773},"2025-05-20",[770,771,772],"Modern financial services firms face a critical challenge: balancing innovation against complex compliance requirements and regulatory frameworks. A comprehensiveDevSecOps approach transforms this traditional trade-off into a competitive advantage.","Financial institutions with fragmented toolchains experience significant friction points where each tool boundary introduces potential compliance gaps and security vulnerabilities — ultimately increasing compliance risks and exposure to hefty fines.","Organizations embracing unified software delivery platforms report 50-70% faster time-to-delivery, dramatically reduced operational disruptions, stronger compliance posture, and enhanced protection against cyber threats and financial crimes.","Decision-makers in financial services organizations face mounting pressure from multiple directions. Client expectations for digital innovation continue to rise while financial regulators simultaneously impose increasingly stringent compliance requirements.\n\nMany institutions have unwittingly positioned themselves on a seesaw where improving one side necessarily diminishes the other. When innovation accelerates, compliance struggles to keep pace - and when compliance processes tighten, development velocity slows.\n\nThis perceived incompatibility is not an inherent truth but rather a symptom of fragmented technology architecture.\n\n## The high cost of fragmentation\nMost established financial institutions - from investment banks to insurance companies and credit unions - operate with sprawling software delivery stacks cobbled together from disparate tools. This patchwork approach creates significant vulnerabilities across the organization:\n- Each tool boundary represents a potential security vulnerability and compliance gap\n- Disjointed workflows frequently result in compliance breaches\n- Limited visibility across teams hampers ongoing compliance efforts\n- Increased complexity drives higher costs for maintaining regulatory compliance\n- Fragmented systems significantly increase exposure to financial losses and reputational damage\n\nWith the exponential rise in cyberattacks targeting the financial industry - 3,348 cyber incidents were reported worldwide in 2023, up from 1,829 the previous year - maintaining this fragmented approach is increasingly risky.\n\n## The DevSecOps transformation advantage\nForward-thinking organizations are discovering that DevSecOps isn't merely a technical methodology - it's a strategic business transformation that fundamentally changes how financial services organizations adhere to regulatory requirements  and prepare for audits.\n\nBy building security and compliance directly into the development process rather than treating them as reactive, separate functions, modern platforms transform what was once a painful trade-off into a competitive advantage:\n- Automated vulnerability detection in real time as developers write code\n- Continuous monitoring and compliance verification against regulatory standards\n- Comprehensive audit trails satisfying regulatory audit requirements\n- Pre-configured compliance templates tailored to financial services industry needs\n- Granular access control maintaining separation of duties while enabling collaboration\n- Version control and advanced workflow controls ensuring proper approval processes\n- Real-time metrics on development velocity, security posture, and compliance risks\n\n## Proven results from industry leaders\nFinancial institutions implementing unified DevSecOps approaches consistently report transformative business outcomes:\n- 50-70% reduction in time-to-delivery of new solutions\n- Dramatic simplification of toolchain complexity\n- Enhanced protection against cyber risks and financial crimes\n- Significant reductions in operational costs\n- Improved ability to attract and retain top technical talent\n- Better compliance posture with fewer security incidents and higher compliance scores\n\n## Seize the opportunity\nThe future of financial services technology is one where institutions no longer need to make painful choices between speed, security, and innovation. By evolving to a unified platform approach, your organization can deliver on all three objectives simultaneously while reducing risk, improving operational efficiency, and building a more agile foundation for future growth.\n\nDownload our comprehensive guide to discover how your organization can implement this transformative approach, with detailed implementation frameworks, critical success factors, and real-world case studies from leading financial services companies who have successfully navigated this journey.","the-key-to-innovation-and-compliance-in-financial-services","content:en-us:the-source:security:the-key-to-innovation-and-compliance-in-financial-services:index.yml","en-us/the-source/security/the-key-to-innovation-and-compliance-in-financial-services/index.yml","en-us/the-source/security/the-key-to-innovation-and-compliance-in-financial-services/index",{"_path":779,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"config":780,"seo":782,"content":786,"type":385,"slug":812,"category":24,"_id":813,"_type":31,"title":7,"_source":32,"_file":814,"_stem":815,"_extension":35},"/en-us/the-source/security/compliance-at-the-speed-of-ai-reimagining-grc",{"layout":9,"template":387,"articleType":388,"author":781,"featured":330,"gatedAsset":463},"ayoub-fandi",{"title":783,"description":784,"ogImage":785},"Compliance at the speed of AI: Reimagining GRC","Is your governance, risk, and compliance strategy keeping pace with AI-accelerated development? Learn how to prepare for secure software delivery at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463857/sb6to0pyohg2ubpxf3ex.png",{"title":783,"date":787,"description":784,"timeToRead":535,"heroImage":785,"keyTakeaways":788,"articleBody":792,"faq":793},"2025-05-14",[789,790,791],"Traditional GRC approaches fail in modern development environments because they operate on quarterly/annual cycles while DevSecOps teams deploy code multiple times daily, creating a fundamental timing mismatch and compliance that exists only on paper.","Successful GRC modernization requires shifting from a project to a product mindset, building continuous compliance into development pipelines, and automating evidence collection as a byproduct of normal development activities.","Organizations must create unified information flows between security functions, replace manual processes with API-driven automation, and redefine metrics to focus on risk reduction rather than compliance artifacts.","The software release calendar has been replaced by a continuous flow of updates and innovations. Yet many organizations still approach compliance like it's 2010.\n\nThe adoption of DevOps practices fundamentally changed the game, compressing release cycles from months to days or even hours. Organizations that once celebrated quarterly releases now deploy to production dozens or hundreds of times daily. This acceleration has delivered enormous business value - faster time to market, quicker feedback loops, and increased competitive advantage.\n\nNow add AI-powered development tools to the mix. Large language models, AI coding assistants, and [AI agents](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) have become sophisticated enough to generate substantial amounts of functional code with minimal human input.\n\nHowever, this creates a significant challenge for governance, risk, and compliance (GRC) teams, who are often still using approaches designed for a world where releases occur quarterly, rather than hourly. Traditional GRC approaches simply weren't designed for this velocity and scale - it’s like trying to monitor and track every car on every highway in the world with a pen and paper.\n\n## Why traditional GRC falls short\nThe fundamental mismatch between modern development and traditional GRC starts with timing. While DevSecOps teams operate continuously, traditional GRC functions typically operate on quarterly or annual cycles. Annual penetration tests, quarterly compliance control testing, and monthly risk assessments simply can't keep pace with environments that change hourly. By the time a traditional security assessment is complete, the system being evaluated may have undergone dozens of changes.\n\nThe gap between automated infrastructure and manual compliance processes compounds this timing mismatch. Cloud-native applications automatically scale resources up and down in response to demand. Infrastructure-as-code templates can spin up and tear down entire environments with a single command. Meanwhile, compliance verification still relies heavily on manual evidence collection and human review. GRC teams can spend days taking screenshots of configurations that were automatically changed minutes after they documented them.\n\nThe result is security compliance that exists largely on paper but bears little resemblance to operational reality. When your integrated DevSecOps platform supports hundreds of deployments daily, yet your GRC team still manually collects screenshots every quarter for audit purposes, you have a fundamental disconnect. Risk registers become outdated almost immediately. Compliance certifications verify controls that may no longer exist in the form originally documented. And security policies address threats to systems that have since been redesigned or replaced entirely.\n\n## Transforming GRC for modern DevSecOps\nI’ve seen this tension unfold in countless organizations. Here are a few steps you can take now to help GRC keep up:\n\n### Think about GRC as a product, not a project\nThe first step in transforming GRC for modern DevSecOps environments requires a fundamental shift in thinking. Traditional GRC operates as a project - a recurring set of activities with a defined beginning and end. Modern GRC needs to function as a product - a continuously evolving set of capabilities that deliver ongoing value.\n\nThis product mindset transforms how we approach compliance and security. Instead of preparing for an annual SOC 2 audit by scrambling to collect evidence in the weeks before the auditor arrives, think about building continuous compliance directly into your development pipeline. Instead of quarterly risk management assessments, aim for real-time visibility. And look for ways to embed governance in daily operations, with version-controlled policies managed like code using Markdown.\n\nWithin [a unified DevSecOps platform](https://about.gitlab.com/platform/), this product-based approach happens naturally. Security scans become part of the merge request process. Compliance requirements transform into pipeline rules that run with every commit. And audit evidence is automatically collected as a byproduct of normal development activities. The result? The focus shifts from \"passing the audit\" to \"[building securely by default](https://about.gitlab.com/the-source/security/strengthen-your-cybersecurity-strategy-with-secure-by-design/).\"\n\n### Create unified, automated information flows\nYou’ll also need to rethink both the architecture of your GRC program and the engineering approach behind it. Begin by establishing unified information flows among security, risk, and compliance functions. A vulnerability found in a security scan should automatically update your risk register and compliance status without manual intervention. This unified data model ensures everyone works from a single source of truth, breaking down siloes between security and development teams.\n\nThe next step is to replace manual evidence collection with API-driven automation. Instead of taking screenshots of access control settings, implement API calls that query your identity provider and generate access reports automatically. Rather than manually reviewing infrastructure settings, pull configuration data directly from your cloud providers. Every security setting that requires verification should be accessible programmatically.\n\nPerhaps most importantly, leverage the same pipeline-based approach for security that you use for code validation. [Integrated CI/CD pipelines](https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation/) allow you to define security and compliance requirements as code, running automated validation with every change. This infrastructure-as-code approach ensures that security controls are implemented consistently and verified continuously, eliminating the gap between documented controls and operational reality.\n\n### Connect GRC to business value\nThe practical implementation of these changes doesn't happen overnight, but organizations can follow a clear path to transform their GRC approach.\n\nFirst, bridge the cultural and language gap between GRC and engineering teams. Security professionals need to understand how developers work, while engineers need to appreciate security requirements. This mutual understanding creates the foundation for effective collaboration. Create joint working sessions where compliance teams learn basic Git workflows while developers understand compliance requirements in concrete terms.\n\nNext, redefine success metrics to focus on risk reduction rather than compliance artifacts. Instead of tracking the number of policies documented or controls tested, measure actual security outcomes: vulnerability remediation times, security issues found in production versus development, and the number of compliance exceptions. These outcome-based metrics drive real improvements in security posture.\n\nThis transforms GRC from a necessary evil to a business enabler. When [security and compliance are built into development workflows](https://about.gitlab.com/the-source/security/beyond-shift-left-engineering-supply-chain-safety-at-scale/), they stop being roadblocks and become competitive advantages. Organizations with integrated security can ship faster and with greater confidence than those with traditional bolted-on approaches.\n\nThis transformation becomes even more powerful within a unified platform. End-to-end visibility across the entire software development lifecycle creates unmatched transparency into security status. The same controls that verify code quality can enforce security requirements, creating a seamless experience for developers while maintaining strong governance for security teams.\n\n## Security as an enabler, not a bottleneck\nAs AI-accelerated development transforms software development, GRC must evolve from a checkpoint process to an integral part of the development workflow. Organizations can maintain strong governance without sacrificing speed by adopting a product mindset, reimagining GRC architecture, and implementing engineering solutions that match the pace of modern development. The future of GRC isn't about slowing down development - it's about building security and compliance into every step of the process, enabling teams to move faster with greater confidence.",[794,797,800,803,806,809],{"header":795,"content":796},"Why do traditional GRC models struggle in modern software environments?","Traditional GRC models operate on quarterly or annual cycles, but DevSecOps teams now deploy code multiple times a day. This timing mismatch means compliance efforts often lag behind actual development changes, making them ineffective in dynamic environments.",{"header":798,"content":799},"What does it mean to treat GRC as a product instead of a project?","Viewing GRC as a product means continuously evolving and embedding compliance into daily workflows, rather than treating it as a periodic event. It’s about creating always-on capabilities like automated evidence collection and policy enforcement through code.",{"header":801,"content":802},"How can automation improve governance and compliance?","Automation reduces the reliance on manual reviews and paperwork by using API calls and pipeline integrations to validate security settings and collect audit data. This makes compliance scalable, real-time, and aligned with the pace of software delivery.",{"header":804,"content":805},"What tools or strategies support continuous compliance?","Unified DevSecOps platforms with integrated CI/CD pipelines support continuous compliance. They allow you to define security policies as code, apply them automatically with every change, and log evidence of compliance as part of normal workflows.",{"header":807,"content":808},"How should success be measured in modern GRC programs?","Instead of counting controls or documented policies, success should be measured through real-world outcomes like faster vulnerability remediation, fewer security exceptions, and better security hygiene from development to production.",{"header":810,"content":811},"How can AI development practices coexist with compliance requirements?","By embedding guardrails and governance into the software pipeline, AI-powered development can align with compliance needs. Structured policies, automated validation, and continuous monitoring ensure security isn’t compromised while enabling fast iteration.","compliance-at-the-speed-of-ai-reimagining-grc","content:en-us:the-source:security:compliance-at-the-speed-of-ai-reimagining-grc:index.yml","en-us/the-source/security/compliance-at-the-speed-of-ai-reimagining-grc/index.yml","en-us/the-source/security/compliance-at-the-speed-of-ai-reimagining-grc/index",{"_path":817,"_dir":24,"_draft":6,"_partial":6,"_locale":7,"config":818,"seo":820,"content":824,"type":385,"slug":850,"category":24,"_id":851,"_type":31,"title":7,"_source":32,"_file":852,"_stem":853,"_extension":35},"/en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain",{"layout":9,"template":387,"articleType":388,"author":819,"featured":330,"gatedAsset":463},"lee-faus",{"title":821,"description":822,"ogImage":823},"Embedding risk intelligence into your software supply chain","Transform your security strategy by embedding risk assessment into development workflows instead of treating it as a final checkpoint.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463994/rexeefvqpj1xs8vq7ugl.jpg",{"title":821,"date":825,"description":822,"timeToRead":398,"heroImage":823,"keyTakeaways":826,"articleBody":830,"faq":831},"2025-04-22",[827,828,829],"Focus on business impact instead of vulnerability counts by targeting security threats that pose actual danger to your business rather than trying to fix every potential issue.","Embed risk checks throughout development by adding quality metrics and automated testing early in your software pipeline to catch issues when they’re easier to fix.","Create audit trails for security decisions through “breadcrumbed” processes that document who approved changes and why, creating accountability and improving future decisions.","It’s a nightmare scenario for any business: Hackers have exposed the personal information of millions of your users. What if this wasn’t due to critical vulnerabilities in your application but simply poorly configured API endpoints that hackers could abuse to farm user data? That’s precisely what happened to a popular tech company in 2023, and it’s more common than you might think.\n\nSecurity resources are finite, and [not all threats pose equal business risk](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/). Organizations that are laser-focused on technical severity ratings rather than actual business impact could be leaving themselves open to unanticipated risks.\n\nMeanwhile, the urgency for better risk quantification has never been greater. Threat actors can now leverage multiple AI systems to execute sophisticated, multi-pronged attacks targeting exploitable vulnerabilities. These AI-accelerated campaigns can quickly identify and exploit business-critical weaknesses that traditional security approaches might overlook or deprioritize - turning yesterday's “medium-risk” vulnerability into today’s multimillion-dollar breach.\n\nTo counter these evolving threats and navigate this growing complexity, leading organizations are fundamentally reimagining their approach. Instead of treating security as a separate function that happens after development, they’re embedding **risk intelligence** throughout their software supply chain. This approach allows them to focus resources where they matter most, reduce time-to-market for secure products, and demonstrate due diligence to regulators and customers.\n\nThe key is distinguishing between vulnerabilities that might cause harm and those that will cause damage in your specific business context. Companies can achieve stronger security and faster innovation by rethinking how risk is evaluated and managed across development and operations.\n\n## Limiting risk through data-driven change management\n**Risk intelligence helps you focus on threats that matter. It’s the difference between knowing you have 3,000 vulnerabilities and understanding which 50 could harm your business.**\n\nKey elements of risk intelligence include:\n\n**Exploitability assessment (reachability)**: Not all vulnerabilities can be weaponized. Risk intelligence evaluates which security findings have actual attack paths versus those that exist in code but cannot be reached by malicious actors.\n\n**Dependency context**: Risk-based security recognizes that a vulnerable package doesn’t just affect one application - it can impact dozens or hundreds across your organization. Modern approaches map dependencies across projects, enabling teams to understand the cascading impact of vulnerabilities throughout the organization. This ecosystem view provides critical context for prioritization decisions.\n\n**Continuous risk monitoring**: Instead of point-in-time assessments, risk intelligence requires ongoing monitoring that adjusts as threat landscapes evolve. A vulnerability that was low risk yesterday may become critical today based on emerging exploit techniques.\n\nSo how can you move from reactive security scanning to proactive risk intelligence? The journey begins where your software does - in the software factory itself.\n\n## The software factory: Quality gates and risk signals\nThe software factory is where code transforms from an idea to a deployable package. This phase encompasses everything from initial code commits to unit testing to packaging, creating the foundation for your entire software supply chain. By adding risk checks early, teams can find and fix issues before they spread. Just as critical is establishing clear attribution for every code change, knowing exactly who made each change (contractor, consultant, or employee), why, and when - creating an audit trail providing crucial risk assessment context.\n\nThe software factory offers three key opportunities to embed risk assessment into your development process:\n\n### Collaboration through quality intelligence\nEstablishing cross-functional quality metrics can help organizations create a shared understanding of risk across teams. Potential metrics include code coverage trends, security vulnerability density, technical debt accumulation, performance regression patterns, API compatibility scores, and documentation completeness.\n\n### Transparency through correlated data\nRisk intelligence requires connecting disparate data points into a comprehensive view. Quality intelligence dashboards with real-time metrics and trend visualization help teams spot emerging risk patterns, while documentation traceability creates auditable trails linking requirements, changes, and security findings. Automated data collection enables cross-system correlation between code changes and security findings, with pattern recognition algorithms identifying unusual behaviors that manual review might miss. This democratized intelligence empowers all stakeholders to make risk-informed decisions instead of siloing information within security teams.\n\n### Automation for quality assurance\nManual risk assessment can’t scale to modern development speeds. Continuous testing pipelines with automated security scans and performance tests provide early feedback on potential risks without slowing velocity. Automated quality gates enforce minimum standards throughout development, and risk threshold monitoring flags concerning trends before they become critical. These automated guardrails maintain consistent risk assessment while allowing development teams to maintain productivity and improve safety without sacrificing speed.\n\n## Software logistics: Risk management through team-based scorecards\nAfter code is packaged, it enters the logistics phase - provisioning, deployment, configuration, monitoring, and maintenance. Here, potential bugs meet real-world exposure. This makes assessing risk in actual operating conditions vital. However, traditional approaches to risk assessment at this stage are often inflexible and inefficient.\n\n> [Learn how effective software logistics can enable operations teams to efficiently support developers and accelerate delivery](https://about.gitlab.com/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation/).\n\nEffective risk intelligence means helping teams focus on why they should deploy instead of why they shouldn’t - replacing the binary, inflexible assessment methods of the past with an automated, metrics-driven approach. Here are three critical aspects to keep in mind:\n\n### Collaborative assessment model\nModern risk approaches replace binary go/no-go decisions with multi-stakeholder evaluations, sometimes called a Change Advisory Board (CAB), incorporating diverse perspectives. Security teams evaluate vulnerability context and exploitability, operations teams assess deployment impact and rollback capabilities, and business stakeholders weigh customer impact against needs. This team-based approach builds consensus around acceptable risk rather than imposing rigid standards, allowing for nuanced decisions that balance security with business objectives.\n\n### Scorecard transparency\nEffective risk evaluation requires visible criteria that consider multiple dimensions of impact. Comprehensive scorecards include security risk factors that assess severity and real-world exploitability, operational metrics that evaluate system stability implications, compliance requirements for relevant regulations, and business impact on customers and revenue. This transparent approach creates a holistic risk profile that provides the context necessary for informed deployment decisions while ensuring all stakeholders understand the basis for security choices.\n\n### Automated scorecard processing\nManual risk assessment creates bottlenecks that slow deployment cycles. Modern approaches use real-time processing with automated score calculation and threshold monitoring to evaluate changes continuously. Integration with CI/CD pipelines, security tools, and compliance systems ensures risk data flows automatically between systems without manual intervention. This automation maintains consistent evaluation standards while eliminating the delays typically associated with security reviews.\n\n## From vulnerability counts to business impact: The future of software security\nThe future of application security isn’t about finding more vulnerabilities - it’s about understanding the risk those vulnerabilities pose. By embedding risk intelligence throughout your software supply chain, you can drive team collaboration to help you create secure software faster.\n\nEstablishing this risk assessment process across both your software factory and logistics phases has an added benefit: You’ll create an auditable trail that documents who made security decisions, what evidence they considered, when changes were approved, and why specific actions were taken. This transparency provides accountability across the entire software supply chain, builds institutional memory of risk management approaches, and creates data to inform future decisions. The resulting traceability transforms security from a point-in-time assessment to an ongoing, verifiable process demonstrating due diligence to auditors, regulators, and customers.",[832,835,838,841,844,847],{"header":833,"content":834},"What is risk intelligence in software development?","Risk intelligence is the practice of evaluating security threats based on their real-world business impact rather than just technical severity. It helps teams focus on exploitable and high-priority vulnerabilities, streamlining security efforts.",{"header":836,"content":837},"How does embedding risk checks early improve software security?","Introducing risk assessments during early development phases allows teams to catch and resolve issues sooner, reducing costs and complexity. This shift from reactive to proactive security enhances both speed and safety.",{"header":839,"content":840},"Why should organizations move beyond vulnerability counts?","Counting vulnerabilities doesn't reflect the true risk landscape. Many may be unreachable or irrelevant. Prioritizing based on exploitability and business context ensures limited security resources are used effectively.",{"header":842,"content":843},"How do audit trails contribute to better risk management?","Audit trails document who made a change, why, and when. These records provide accountability, aid compliance, and offer valuable insight for improving future decision-making and demonstrating due diligence.",{"header":845,"content":846},"What role does automation play in risk intelligence?","Automation enables consistent, scalable risk evaluation across CI/CD pipelines. It helps enforce security standards, reduces manual bottlenecks, and ensures timely responses to emerging risks without slowing development.",{"header":848,"content":849},"What’s the benefit of team-based scorecards for deployment decisions?","Team-based scorecards bring together inputs from security, operations, and business teams. This collaborative model replaces rigid go/no-go decisions with nuanced assessments that balance innovation and acceptable risk.","embedding-risk-intelligence-into-your-software-supply-chain","content:en-us:the-source:security:embedding-risk-intelligence-into-your-software-supply-chain:index.yml","en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain/index.yml","en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain/index",{"categoryNames":855},{"ai":356,"platform":363,"security":98},1753733242798]