import{j as e}from"./ui-vendor-C2HcqRsg.js";import{b3 as t,H as s,aD as a,a as i,ay as n,B as r,az as o,T as l,aI as c,b as d,b4 as m,b5 as p,b6 as x,b7 as u,b8 as h,b9 as g,y as f,M as y,aU as b,F as j,aN as v,ba as w,aE as N,bb as k,u as I}from"./index-XbLgZOjd.js";import{L as A}from"./react-vendor-DAEgK_3Y.js";import"./chart-vendor-CanUHUaG.js";const M=[{id:"data-pipeline",icon:v,name:"Data Pipeline Layer",capabilities:["Automated data ingestion from your source systems (databases, APIs, streaming sources, file stores)","Data validation: schema checks, distribution monitoring, completeness verification","Feature engineering pipelines with versioning and reproducibility","Feature store integration for consistent features across training and serving"]},{id:"model-development",icon:w,name:"Model Development Layer",capabilities:["Experiment tracking: every training run logged with parameters, metrics, and artifacts","Automated hyperparameter optimization","Model evaluation against business KPIs, not just technical metrics like accuracy, but the outcomes that matter to your stakeholders","Reproducibility guarantees: any model can be rebuilt from its exact training configuration"]},{id:"deployment",icon:N,name:"Deployment Layer",capabilities:["CI/CD for machine learning: automated testing, validation, and deployment pipelines","KPI-gated deployments: models must clear predefined performance thresholds before reaching production.","Reversible rollouts: every deployment can be rolled back instantly. If real-world performance drops below thresholds, rollback triggers automatically.","A/B testing infrastructure for comparing model versions in production","Multi-environment support: development, staging, production, with promotion gates between each"]},{id:"monitoring",icon:k,name:"Monitoring Layer",capabilities:["Real-time model performance tracking: prediction accuracy, latency, throughput","Data drift detection: automated alerts when input data distributions shift beyond acceptable bounds","Business KPI dashboards: the metrics your stakeholders care about, updated in real-time","Automated retraining triggers: when performance degrades beyond thresholds, the retraining pipeline activates"]},{id:"governance",icon:I,name:"Governance Layer",capabilities:["Model registry with full lineage: who built it, what data trained it, when it was deployed, how it's performing","Audit trails for every model decision: training, validation, deployment, rollback","Bias detection and fairness monitoring","Compliance documentation: automated reporting for regulatory requirements","Access controls: role-based permissions for model development, approval, and deployment"]}],P=[{step:1,title:"Define KPI Thresholds",description:"During Discovery, we define KPI thresholds with your stakeholders. These aren't just model accuracy targets. They're business outcomes. Revenue impact. Cost reduction. Error rate improvements. Processing time."},{step:2,title:"Evaluate During Development",description:"During development, every model version is evaluated against these KPIs using holdout data and business simulation."},{step:3,title:"Gate Check at Deployment",description:"At deployment, the KPI gate checks whether the model meets all thresholds. If it passes, it deploys. If it doesn't, it goes back to the development team with a clear gap analysis."},{step:4,title:"Monitor in Production",description:"In production, KPIs are monitored continuously. If performance drops below thresholds, the system automatically triggers either a rollback or a retraining cycle, depending on the severity."}],L=["Every deployment maintains the previous model version in a warm standby state","Automated health checks run continuously after deployment","If any health check fails (performance degradation, latency spikes, prediction anomalies) the system reverts to the previous version automatically","Full audit trail records every rollout and rollback event","Stakeholders receive automated notifications for any deployment state change"],C=[{category:"Orchestration",tools:"Kubeflow, Airflow, Prefect",purpose:"Pipeline scheduling and management"},{category:"Experiment Tracking",tools:"MLflow, Weights & Biases",purpose:"Training run logging and comparison"},{category:"Model Registry",tools:"MLflow, custom solutions",purpose:"Model versioning and lineage"},{category:"Feature Store",tools:"Feast, Tecton",purpose:"Consistent feature management"},{category:"Serving",tools:"Seldon, BentoML, custom APIs",purpose:"Model inference at scale"},{category:"Monitoring",tools:"Evidently, Grafana, custom dashboards",purpose:"Performance and drift detection"},{category:"CI/CD",tools:"GitHub Actions, Jenkins, GitLab CI",purpose:"Automated testing and deployment"},{category:"Infrastructure",tools:"Kubernetes, Terraform",purpose:"Scalable, reproducible environments"}],S=[{timeline:"Week 3–4",title:"Architecture",description:"Pipeline design, tooling selection, infrastructure planning"},{timeline:"Week 9–10",title:"Platform Setup",description:"Full pipeline implementation, testing, documentation"},{timeline:"Week 11–12",title:"First Deployment",description:"First model flows through the complete pipeline to production"},{timeline:"Week 13",title:"Handoff",description:"Your team takes ownership with full runbooks and operational documentation"}],D=()=>{const v=t();return e.jsxs(e.Fragment,{children:[e.jsxs(s,{children:[e.jsx("title",{children:"MLOps Pipeline Setup | GCC-as-a-Service | Allerin"}),e.jsx("meta",{name:"description",content:"Production-grade MLOps pipeline setup for AI capability centers. Data pipelines, model development, deployment automation, monitoring, and governance, all operational in weeks."}),e.jsx("meta",{property:"og:title",content:"MLOps Pipeline Setup | GCC-as-a-Service | Allerin"}),e.jsx("meta",{property:"og:description",content:"End-to-end MLOps pipeline architecture: from data ingestion to model governance. Built for production AI at enterprise scale."}),e.jsx("meta",{property:"og:type",content:"article"}),e.jsx("script",{type:"application/ld+json",children:JSON.stringify({"@context":"https://schema.org","@type":"Article",headline:"MLOps Pipeline Setup: GCC-as-a-Service",description:"Production-grade MLOps pipeline setup covering data pipelines, model development, deployment, monitoring, and governance.",publisher:{"@type":"Organization",name:"Allerin",url:"https://www.allerin.com"}})})]}),e.jsx(a,{path:"/gcc-as-a-service/mlops-pipeline-setup"}),e.jsx(i,{}),e.jsxs("main",{id:"main",children:[e.jsx("section",{className:"bg-surface/50 py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-4xl",children:[e.jsxs("nav",{"aria-label":"Breadcrumb",className:"flex items-center gap-1 text-sm text-muted-foreground mb-lg",children:[e.jsx(A,{to:"/gcc-as-a-service",className:"hover:text-data-teal transition-colors",children:"GCC-as-a-Service"}),e.jsx(n,{className:"h-3.5 w-3.5"}),e.jsx("span",{className:"text-foreground font-medium",children:"MLOps Pipeline Setup"})]}),e.jsx("h1",{className:"text-3xl md:text-4xl lg:text-5xl font-bold tracking-tight mb-md",children:"Production MLOps for Real Workloads"}),e.jsx("p",{className:"text-data-teal font-semibold text-lg mb-sm",children:"The Infrastructure That Turns AI Experiments Into Business Operations"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-2xl mb-lg",children:"There's a wide gap between a data scientist's notebook and a model that runs reliably in production, day after day, at enterprise scale. That gap is MLOps. Most AI initiatives stall here. The model works in development. It passes validation. And then it sits in a staging environment for months because nobody built the infrastructure to deploy it safely, monitor it continuously, and retrain it when the data shifts. Allerin builds the MLOps infrastructure that closes this gap as a core component of every AI capability center we deliver."}),e.jsx(r,{asChild:!0,size:"lg",children:e.jsxs(A,{to:"/contact",children:["Schedule an AI Readiness Assessment ",e.jsx(o,{className:"ml-1 h-4 w-4"})]})})]})}),e.jsx("section",{className:"py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-5xl",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold tracking-tight mb-sm",children:"What We Build: The Full Model Lifecycle"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-3xl mb-xl",children:"Every production AI system requires a pipeline that covers the complete model lifecycle: training, deployment, and everything before, between, and after."}),e.jsx("div",{className:"space-y-0",children:M.map((t,s)=>{const a=t.icon;return e.jsx("div",{className:l("border-l-4 border-data-teal p-lg md:p-xl",s%2==0?"bg-background":"bg-surface/50"),children:e.jsxs("div",{className:"flex flex-col md:flex-row md:items-start gap-md",children:[e.jsxs("div",{className:"md:w-[30%] flex items-center gap-sm",children:[e.jsx("div",{className:"flex h-10 w-10 shrink-0 items-center justify-center rounded-lg bg-data-teal/10 text-data-teal",children:e.jsx(a,{className:"h-5 w-5"})}),e.jsx("h3",{className:"text-lg font-bold",children:t.name})]}),e.jsx("ul",{className:"md:w-[70%] grid grid-cols-1 sm:grid-cols-2 gap-x-lg gap-y-xs text-sm text-muted-foreground",children:t.capabilities.map(t=>e.jsxs("li",{className:"flex items-start gap-2",children:[e.jsx("span",{className:"text-data-teal mt-0.5",children:"•"}),e.jsx("span",{children:t})]},t))})]})},t.id)})})]})}),e.jsx("section",{className:"bg-surface/50 py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-5xl",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold tracking-tight mb-sm",children:"KPI Gates: Our Signature Approach"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-3xl mb-xl",children:"AI deployments often fail because the model is deployed without a clear definition of success. KPI gates solve this. Before any model reaches production, it must pass through a defined set of business performance criteria, agreed upon during the Discovery phase and validated against real data."}),e.jsx("div",{className:l("flex",v?"flex-col gap-lg":"items-start gap-0"),children:P.map((t,s)=>e.jsxs("div",{className:l("flex",v?"items-start gap-md":"flex-col items-center text-center flex-1"),children:[e.jsx("div",{className:"flex h-10 w-10 shrink-0 items-center justify-center rounded-full bg-data-teal text-white font-bold text-sm",children:t.step}),e.jsxs("div",{className:l(v?"":"mt-md px-2"),children:[e.jsx("p",{className:"font-semibold text-sm mb-xs",children:t.title}),e.jsx("p",{className:"text-muted-foreground text-xs",children:t.description})]}),!v&&s<P.length-1&&e.jsx(n,{className:"h-5 w-5 text-data-teal/40 mt-3 -mr-2 hidden md:block absolute",style:{position:"relative"}})]},t.step))}),e.jsx("p",{className:"text-muted-foreground text-sm mt-xl max-w-3xl",children:"This approach eliminates the most expensive failure mode in enterprise AI: deploying a model that technically works but doesn't deliver business value."})]})}),e.jsx("section",{className:"py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-4xl",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold tracking-tight mb-sm",children:"Reversible Rollouts: Why It Matters"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-3xl mb-lg",children:"Production AI carries real risk. A model that makes incorrect predictions can affect customer experience, operational efficiency, or regulatory compliance. Our deployment pipeline ensures every model deployment is reversible: in practice, instantly, with zero downtime."}),e.jsx("div",{className:"bg-data-teal/5 border border-data-teal/20 rounded-lg p-lg space-y-sm",children:L.map(t=>e.jsxs("div",{className:"flex items-start gap-sm",children:[e.jsx(c,{className:"h-5 w-5 text-data-teal shrink-0 mt-0.5"}),e.jsx("p",{className:"text-sm text-muted-foreground",children:t})]},t))})]})}),e.jsx("section",{className:"bg-surface/50 py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-5xl",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold tracking-tight mb-sm",children:"The Allerin MLOps Stack"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-3xl mb-xl",children:"We're tool-agnostic but opinionated. We select the right tools for your environment, but we've refined preferences based on what actually works in production at enterprise scale."}),v?e.jsx("div",{className:"space-y-md",children:C.map(t=>e.jsxs(d,{className:"p-md space-y-xs",children:[e.jsx("p",{className:"font-semibold text-sm",children:t.category}),e.jsx("p",{className:"font-mono text-xs text-data-teal",children:t.tools}),e.jsx("p",{className:"text-xs text-muted-foreground",children:t.purpose})]},t.category))}):e.jsxs(m,{children:[e.jsx(p,{children:e.jsxs(x,{children:[e.jsx(u,{children:"Category"}),e.jsx(u,{children:"Common Tools"}),e.jsx(u,{children:"Purpose"})]})}),e.jsx(h,{children:C.map(t=>e.jsxs(x,{children:[e.jsx(g,{className:"font-medium",children:t.category}),e.jsx(g,{className:"font-mono text-sm text-data-teal",children:t.tools}),e.jsx(g,{className:"text-muted-foreground",children:t.purpose})]},t.category))})]}),e.jsx("p",{className:"text-muted-foreground text-sm mt-lg max-w-3xl",children:"We adapt this stack to your existing infrastructure. If you're on AWS, Azure, or GCP, we integrate with your cloud-native ML services. If you have an existing data platform, we build on top of it, not beside it."})]})}),e.jsx("section",{className:"py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-4xl",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold tracking-tight mb-sm",children:"How This Fits Into the 90-Day Blueprint"}),e.jsx("p",{className:"text-muted-foreground text-lg max-w-3xl mb-lg",children:"The MLOps pipeline isn't a separate workstream. It's woven into the capability center buildout:"}),e.jsx("div",{className:"space-y-md mb-lg",children:S.map(t=>e.jsxs("div",{className:"flex items-start gap-md",children:[e.jsx(f,{variant:"outline",className:"shrink-0 border-data-teal/30 bg-data-teal/10 text-data-teal text-xs font-mono",children:t.timeline}),e.jsxs("div",{children:[e.jsx("p",{className:"font-semibold text-sm",children:t.title}),e.jsx("p",{className:"text-xs text-muted-foreground",children:t.description})]})]},t.timeline))}),e.jsx("p",{className:"text-muted-foreground text-sm mb-lg max-w-3xl",children:"By day 90, the pipeline isn't a prototype. It's a production system that has already deployed a real model."}),e.jsx(r,{variant:"outline",asChild:!0,children:e.jsxs(A,{to:"/gcc-as-a-service/ai-capability-center",children:["See the full 90-Day Blueprint ",e.jsx(o,{className:"ml-1 h-4 w-4"})]})})]})}),e.jsx("section",{className:"bg-gradient-to-br from-foreground to-foreground/90 text-background py-section-y",children:e.jsxs("div",{className:"container mx-auto px-6 max-w-3xl text-center",children:[e.jsx("h2",{className:"text-2xl md:text-3xl font-bold mb-md",children:"Ready to Build Your MLOps Foundation?"}),e.jsx("p",{className:"text-background/70 text-lg mb-lg max-w-xl mx-auto",children:"If your AI initiatives are stuck between proof-of-concept and production, the bottleneck is usually infrastructure, not models. Let's talk about what production-grade MLOps looks like for your organization."}),e.jsx(r,{asChild:!0,size:"lg",variant:"secondary",children:e.jsxs(A,{to:"/contact",children:["Schedule an AI Readiness Assessment ",e.jsx(o,{className:"ml-1 h-4 w-4"})]})}),e.jsxs("div",{className:"flex flex-col sm:flex-row items-center justify-center gap-md mt-lg text-sm text-background/60",children:[e.jsxs("span",{className:"flex items-center gap-xs",children:[e.jsx(y,{className:"h-4 w-4"})," info@allerin.com"]}),e.jsxs("span",{className:"flex items-center gap-xs",children:[e.jsx(b,{className:"h-4 w-4"})," +1 (512) 200-2416"]})]})]})})]}),e.jsx(j,{})]})};export{D as default};