<!DOCTYPE html>
    <html lang="vi" xmlns="http://www.w3.org/1999/xhtml" prefix="og: http://ogp.me/ns#">
    <head>
<title>Khung quản lý rủi ro AI</title>
<meta name="description" content="Khung quản lý rủi ro AI - Savefile - Blogs - https&#x3A;&#x002F;&#x002F;letrungnghia.mangvn.org&#x002F;savefile&#x002F;Education&#x002F;khung-quan-ly-rui-ro-ai-7467.html">
<meta name="author" content="Blog FOSS by Lê Trung Nghĩa">
<meta name="copyright" content="Blog FOSS by Lê Trung Nghĩa [webmaster@vinades.vn]">
<meta name="generator" content="NukeViet v4.5">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta property="og:title" content="Khung quản lý rủi ro AI">
<meta property="og:type" content="website">
<meta property="og:description" content="Savefile - Blogs - https&#x3A;&#x002F;&#x002F;letrungnghia.mangvn.org&#x002F;savefile&#x002F;Education&#x002F;khung-quan-ly-rui-ro-ai-7467.html">
<meta property="og:site_name" content="Blog FOSS by Lê Trung Nghĩa">
<meta property="og:url" content="https://letrungnghia.mangvn.org/savefile/Education/khung-quan-ly-rui-ro-ai-7467.html">
<link rel="shortcut icon" href="https://letrungnghia.mangvn.org/favicon.ico">
<link rel="canonical" href="https://letrungnghia.mangvn.org/savefile/Education/khung-quan-ly-rui-ro-ai-7467.html">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/" title="Blogs" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Government/" title="Blogs - Government" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Policy/" title="Blogs - Policy" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Philosophy/" title="Blogs - Philosophy" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Security/" title="Blogs - Security" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Legal/" title="Blogs - Legal" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Education/" title="Blogs - Education" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Business/" title="Blogs - Business" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Community/" title="Blogs - Community" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Interoperability/" title="Blogs - Interoperability" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Standards/" title="Blogs - Standards" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Author/" title="Blogs - Author" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Opinions/" title="Blogs - Opinions" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/CC/" title="Blogs - CC" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Cloud/" title="Blogs - Cloud" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Hardware/" title="Blogs - Hardware" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Software/" title="Blogs - Software" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Statistics/" title="Blogs - Statistics" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Trends/" title="Blogs - Trends" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/China/" title="Blogs - China" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/OSs/" title="Blogs - OSs" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Blogs/" title="Blogs - Blogs" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/Outsourcing/" title="Blogs - Outsourcing" type="application/rss+xml">
<link rel="alternate" href="https://letrungnghia.mangvn.org/rss/elearning/" title="Blogs - Elearning" type="application/rss+xml">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/assets/css/font-awesome.min.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/themes/default/css/bootstrap.non-responsive.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/themes/default/css/style.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/themes/default/css/style.non-responsive.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/themes/default/css/news.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/themes/default/css/custom.css" type="text/css">
<link rel="preload" as="style" href="https://letrungnghia.mangvn.org/assets/css/default.vi.1106.css" type="text/css">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/assets/js/jquery/jquery.min.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/assets/js/language/vi.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/assets/js/DOMPurify/purify3.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/assets/js/global.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/assets/js/site.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/themes/default/js/news.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/themes/default/js/main.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/themes/default/js/custom.js" type="text/javascript">
<link rel="preload" as="script" href="https://letrungnghia.mangvn.org/themes/default/js/bootstrap.min.js" type="text/javascript">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/assets/css/font-awesome.min.css">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/themes/default/css/bootstrap.non-responsive.css">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/themes/default/css/style.css">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/themes/default/css/style.non-responsive.css">
<link rel="StyleSheet" href="https://letrungnghia.mangvn.org/themes/default/css/news.css">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/themes/default/css/custom.css">
<link rel="stylesheet" href="https://letrungnghia.mangvn.org/assets/css/default.vi.1106.css">
<style type="text/css">
	body{background: #fff;}
</style>
<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-70254229-3', '');
ga('send', 'pageview');
</script>
</head>
    <body>
<div id="print">
	<div id="hd_print">
		<h2 class="pull-left">Blog FOSS by Lê Trung Nghĩa</h2>
		<p class="pull-right"><a title="Blog FOSS by Lê Trung Nghĩa" href="https://letrungnghia.mangvn.org/">https://letrungnghia.mangvn.org</a></p>
	</div>
	<div class="clear"></div>
	<hr />
	<div id="content">
		<h1>Khung quản lý rủi ro AI</h1>
		<ul class="list-inline">
			<li>Thứ hai - 12/08/2024 05:59</li>
			<li class="hidden-print txtrequired"><em class="fa fa-print">&nbsp;</em><a title="In ra" href="javascript:;" onclick="window.print()">In ra</a></li>
			<li class="hidden-print txtrequired"><em class="fa fa-power-off">&nbsp;</em><a title="Đóng cửa sổ này" href="javascript:;" onclick="window.close()">Đóng cửa sổ này</a></li>
		</ul>
		<div class="clear"></div>
		<div id="hometext">
		</div>
				<div class="imghome">
			<img alt="Khung quản lý rủi ro AI" src="https://letrungnghia.mangvn.org/uploads/letrungnghia/news/2024/ai-rms_timeline.png" width="460" class="img-thumbnail" />
		</div>
		<div class="clear"></div>
		<div id="bodytext" class="clearfix">
			<p align="justify"><b><span style="background: rgb(255, 255, 0);">AI Risk Management Framework</span></b></p>

<p align="justify">Theo: <a href="https://www.nist.gov/itl/ai-risk-management-framework">https://www.nist.gov/itl/ai-risk-management-framework</a></p>

<p align="justify">Vào ngày 29/04/2024, Viện Tiêu chuẩn và Công nghệ quốc gia - NIST (National Institute of Standards and Technology) đã phát hành một ấn phẩm phác thảo dựa trên Khung Quản lý Rủi ro AI - AI RMF (AI Risk Management Framework) để giúp quản lý <span style="font-weight: normal">rủi ro của AI tạo sinh </span><span style="font-weight: normal">(Generative AI).</span><span style="font-weight: normal"> Bản thảo <a href="https://airc.nist.gov/docs/NIST.AI.600-1.GenAI-Profile.ipd.pdf">AI RMF Generative AI Profile</a> có thể giúp các tổ chức nhận diện các </span><span style="font-weight: normal">rủi ro riêng biệt do AI tạo sinh đặt ra và đề xuất các hành động để quản lý rủi ro AI tạo sinh phù hợp nhất với các mục tiêu và ưu tiên của họ. Được </span><span style="font-weight: normal">phát triển suốt năm qua và lấy đầu vào từ <a href="https://www.nist.gov/news-events/news/2023/06/biden-harris-administration-announces-new-nist-public-working-group-ai">nhóm công tác công</a> về AI tạo sinh của NIST với hơn 2.500 thành viên, </span><span style="font-weight: normal">các trung tâm hướng dẫn về một danh sách liệt kê 12 </span><span style="font-weight: normal">rủi ro và hơn 400 hành động các nhà </span><span style="font-weight: normal">phát triển có thể sử dụng để quản lý chúng</span><span style="font-weight: normal">. <a href="https://www.nist.gov/news-events/news/2024/04/department-commerce-announces-new-actions-implement-president-bidens">Nhiều thông tin hơn có ở đây</a>. </span></p>

<p align="justify">Vào ngày 30/04/2024, NIST đã đăng tải thông tin đối chiếu giữa Khung quản lý rủi ro AI của NIST (AI RMF) và Hướng dẫn AI dành cho doanh nghiệp của Nhật Bản (AI GfB.)</p>

<p align="justify">Cộng tác với các khu vực công và tư, NIST đã phát triển một khung để quản lý tốt hơn các <span style="font-weight: normal">rủi ro cho các cá nhân, tổ chức, và xã hội liên quan đến </span><span style="font-weight: normal">trí tuệ nhân tạo (AI).</span><span style="font-weight: normal"> <a href="https://doi.org/10.6028/NIST.AI.100-1">Khung Quản lý Rủi ro AI của NIST (AI RMF)</a></span><span style="font-weight: normal"> dự kiến để tự nguyện sử dụng và nâng cao khả năng kết hợp các cân nhắc đáng tin cậy vào trong thiết kế, </span><span style="font-weight: normal">phát triển, sử dụng và </span><span style="font-weight: normal">đánh giá các sản phẩm, dịch vụ và hệ thống AI. </span></p>

<p align="justify"><span style="font-weight: normal">Được phát hành vào ngày 26/01/2023, Khung này đã được </span><span style="font-weight: normal">phát triển qua một quy trình hướng đồng thuận, mở, minh bạch và cộng tác, bao gồm một Yêu cầu về Thông tin, vài phiên bản phác thảo dành cho các bình luận công khai, nhiều <a href="https://www.nist.gov/itl/ai-risk-management-framework/ai-risk-management-framework-workshops-events">hội thảo</a>, và các cơ hội khác để cung cấp đầu vào. Nó được dự kiến </span><span style="font-weight: normal">xây dựng dựa trên, phù hợp với, và hỗ trợ cho những người khác trong các nỗ lực quản lý </span><span style="font-weight: normal">rủi ro AI. </span></p>

<p align="justify"><span style="font-weight: normal">Cùng với <a href="https://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook">Sổ tay AI RMF của NIST</a> cũng đã được NIST xuất bản cùng với một <a href="https://www.nist.gov/itl/ai-risk-management-framework/roadmap-nist-artificial-intelligence-risk-management-framework-ai">Lộ trình AI RMF</a>, <a href="https://www.nist.gov/itl/ai-risk-management-framework/crosswalks-nist-artificial-intelligence-risk-management-framework">AI RMF Crosswalk</a>, và các </span><a href="https://www.nist.gov/itl/ai-risk-management-framework/perspectives-about-nist-artificial-intelligence-risk-management"><u><span style="font-weight: normal">Quan điểm</span></u></a><span style="font-weight: normal"> khác nhau. Ngoài ra, NIST đang tạo ra một </span><span style="font-weight: normal"><a href="https://www.nist.gov/video/introduction-nist-ai-risk-management-framework-ai-rmf-10-explainer-video">video giải thích</a> về AI RMF.</span></p>

<p align="justify">Vào ngày 30/03/2023, NIST đã công bố thành lập Trung tâm Tài nguyên AI Tin cậy và có Trách nhiệm (<a href="https://airc.nist.gov/Home">Trustworthy and Responsible AI Resource Center</a>), nó sẽ tạo thuận lợi cho việc triển khai, và điều chỉnh phù với mức quốc tế với AI RMF.</p>

<p align="justify">Để xem các bình luận nhận được về các bản thảo trước đó của AI RMF và các Yêu cầu về Thông tin, vui lòng xem trang <a href="https://www.nist.gov/itl/ai-risk-management-framework/ai-rmf-development">Phát triển AI RMF</a>.</p>

<p align="justify"><strong><b>Các tài liệu trước đó </b></strong></p>

<ul>
	<li>
	<p align="justify"><a href="https://www.nist.gov/document/ai-risk-management-framework-2nd-draft">Second draft of the AI Risk Management Framework</a> (18/08/2022)</p>
	</li>
	<li>
	<p align="justify"><a href="https://www.nist.gov/document/ai-risk-management-framework-initial-draft">Initial draft of the AI Risk Management Framework</a> (17/03/2022)</p>
	</li>
	<li>
	<p align="justify"><a href="https://www.nist.gov/document/airmfconceptpaper">Concept paper to help guide development of the AI Risk Management Framework</a> (13/12/2021)</p>
	</li>
	<li>
	<p align="justify"><a href="https://www.nist.gov/document/summary-analysis-responses-nist-artificial-intelligence-risk-management-framework-ai-rmf">Brief summary</a> of <a href="https://www.nist.gov/itl/ai-risk-management-framework/comments-received-rfi-artificial-intelligence-risk-management">responses</a> to the 29/07/2021, RFI (15/10/2021)</p>
	</li>
	<li>
	<p align="justify"><a href="https://www.nist.gov/document/draft-taxonomy-ai-risk-october-15-2021">Draft -Taxonomy of AI Risk</a> (15/10/2021)</p>
	</li>
	<li>
	<p align="justify"><a href="https://www.federalregister.gov/documents/2021/07/29/2021-16176/artificial-intelligence-risk-management-framework">AI Risk Management Framework Request for Information</a> (29/07/2021)</p>
	</li>
</ul>

<div class="image-center"><img alt="" height="435" src="https://letrungnghia.mangvn.org/uploads/letrungnghia/news/2024/ai-rms_timeline.png" width="781" /></div>

<p align="justify"><span style="background: rgb(255, 255, 0);">On April 29, 2024, NIST released a draft publication based on the AI Risk Management Framework (AI RMF) to help manage the risk of Generative AI. The draft <a href="https://airc.nist.gov/docs/NIST.AI.600-1.GenAI-Profile.ipd.pdf">AI RMF Generative AI Profile</a> can help organizations identify unique risks posed by generative AI and proposes actions for generative AI risk management that best aligns with their goals and priorities. Developed over the past year and drawing on input from the NIST generative AI <a href="https://www.nist.gov/news-events/news/2023/06/biden-harris-administration-announces-new-nist-public-working-group-ai">public working grou</a>of more than 2,500 members, the guidance centers on a list of 12 risks and more than 400 actions that developers can take to manage them. <a href="https://www.nist.gov/news-events/news/2024/04/department-commerce-announces-new-actions-implement-president-bidens">More information</a></span><a href="https://www.nist.gov/news-events/news/2024/04/department-commerce-announces-new-actions-implement-president-bidens"><u><span style="background: rgb(255, 255, 0);"> </span></u></a><a href="https://www.nist.gov/news-events/news/2024/04/department-commerce-announces-new-actions-implement-president-bidens"><span style="background: rgb(255, 255, 0);">is available here.</span></a></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">On April 30, 2024, NIST posted a <a href="https://airc.nist.gov/docs/FINAL_Crosswalk1_Terminology_RMF_GfB.pdf">crosswalk</a> between the NIST AI Risk Management Framework (AI RMF)</span><b><span style="background: rgb(255, 255, 0);"> </span></b><span style="background: rgb(255, 255, 0);">and the Japan AI Guidelines for Business (AI GfB.)</span></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">In collaboration with the private and public sectors, NIST has developed a framework to better manage risks to individuals, organizations, and society associated with artificial intelligence (AI). The <a href="https://doi.org/10.6028/NIST.AI.100-1">NIST AI Risk Management Framework (AI RMF</a> is intended for voluntary use and to improve the ability to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.</span></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">Released on January 26, 2023, the Framework was developed through a consensus-driven, open, transparent, and collaborative process that included a Request for Information, several draft versions for public comments, multiple <a href="https://www.nist.gov/itl/ai-risk-management-framework/ai-risk-management-framework-workshops-events">workshops</a>, and other opportunities to provide input. It is intended to build on, align with, and support AI risk management efforts by others.</span></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">A companion <a href="https://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook">NIST AI RMF Playbook</a> also has been published by NIST along with an <a href="https://www.nist.gov/itl/ai-risk-management-framework/roadmap-nist-artificial-intelligence-risk-management-framework-ai">AI RMF Roadmap</a>, <a href="https://www.nist.gov/itl/ai-risk-management-framework/crosswalks-nist-artificial-intelligence-risk-management-framework">AI RMF Crosswalk</a>, and various <a href="https://www.nist.gov/itl/ai-risk-management-framework/perspectives-about-nist-artificial-intelligence-risk-management">Perspectives</a>. In addition, NIST is making available a <a href="https://www.nist.gov/video/introduction-nist-ai-risk-management-framework-ai-rmf-10-explainer-video">video explainer</a> about the AI RMF.</span></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">On March 30, 2023, NIST launched the <a href="https://airc.nist.gov/Home">Trustworthy and Responsible AI Resource Center</a>, which will facilitate implementation of, and international alignment with, the AI RMF.</span></p>

<p align="justify"><span style="background: rgb(255, 255, 0);">To view public comments received on the previous drafts of the AI RMF and Requests for Information, see the <a href="https://www.nist.gov/itl/ai-risk-management-framework/ai-rmf-development">AI RMF Development</a> page.</span></p>

<p align="justify"><strong><span style="background: rgb(255, 255, 0);">Prior Documents</span></strong></p>

<p align="justify">Dịch: Lê Trung Nghĩa</p>

<p align="justify">letrungnghia.foss@gmail.com</p>
&nbsp;
		</div>
				<div id="author">
						<p>
				<strong>Tác giả:</strong>
				<a href="https://letrungnghia.mangvn.org/author/Nghia-Le-Trung/">Nghĩa Lê Trung</a>
			</p>
		</div>
	</div>
	<div id="footer" class="clearfix">
		<div id="url">
			<strong>URL của bản tin này: </strong><a href="https://letrungnghia.mangvn.org/savefile/Education/khung-quan-ly-rui-ro-ai-7467.html" title="Khung quản lý rủi ro AI">https://letrungnghia.mangvn.org/savefile/Education/khung-quan-ly-rui-ro-ai-7467.html</a>

		</div>
		<div class="clear"></div>
		<div class="copyright">
			&copy; Blog FOSS by Lê Trung Nghĩa
		</div>
		<div id="contact">
			<a href="mailto:webmaster@vinades.vn">webmaster@vinades.vn</a>
		</div>
	</div>
</div>
        <div id="timeoutsess" class="chromeframe">
            Bạn đã không sử dụng Site, <a onclick="timeoutsesscancel();" href="https://letrungnghia.mangvn.org/#">Bấm vào đây để duy trì trạng thái đăng nhập</a>. Thời gian chờ: <span id="secField"> 60 </span> giây
        </div>
        <div id="openidResult" class="nv-alert" style="display:none"></div>
        <div id="openidBt" data-result="" data-redirect=""></div>
<div id="run_cronjobs" style="visibility:hidden;display:none;"><img alt="cron" src="/index.php?second=cronjobs&amp;p=k6ZwuIiQ" width="1" height="1" /></div>
<script src="https://letrungnghia.mangvn.org/assets/js/jquery/jquery.min.js"></script>
<script>var nv_base_siteurl="/",nv_lang_data="vi",nv_lang_interface="vi",nv_name_variable="nv",nv_fc_variable="op",nv_lang_variable="language",nv_module_name="news",nv_func_name="savefile",nv_is_user=0, nv_my_ofs=7,nv_my_abbr="+07",nv_cookie_prefix="nv4c_Cgoz2",nv_check_pass_mstime=1738000,nv_area_admin=0,nv_safemode=0,theme_responsive=0,nv_recaptcha_ver=2,nv_recaptcha_sitekey="6LcNwC8UAAAAAMm8ZTYNygweLUQtOU0IapbDRk69",nv_recaptcha_type="image",XSSsanitize=1;</script>
<script src="https://letrungnghia.mangvn.org/assets/js/language/vi.js"></script>
<script src="https://letrungnghia.mangvn.org/assets/js/DOMPurify/purify3.js"></script>
<script src="https://letrungnghia.mangvn.org/assets/js/global.js"></script>
<script src="https://letrungnghia.mangvn.org/assets/js/site.js"></script>
<script src="https://letrungnghia.mangvn.org/themes/default/js/news.js"></script>
<script src="https://letrungnghia.mangvn.org/themes/default/js/main.js"></script>
<script src="https://letrungnghia.mangvn.org/themes/default/js/custom.js"></script>
<script type="application/ld+json">
        {
            "@context": "https://schema.org",
            "@type": "Organization",
            "url": "https://letrungnghia.mangvn.org",
            "logo": "https://letrungnghia.mangvn.org/uploads/letrungnghia/le-trung-nghia.png"
        }
        </script>
<script src="https://letrungnghia.mangvn.org/themes/default/js/bootstrap.min.js"></script>
</body>
</html>