寒假学习笔记1.20
一、 网络协议栈实现
-
网络协议栈架构
python
class NetworkProtocolStack:
"""网络协议栈"""
def init(self):
self.layers = {
'physical': PhysicalLayer(),
'data_link': DataLinkLayer(),
'network': NetworkLayer(),
'transport': TransportLayer(),
'application': ApplicationLayer()
}# 协议注册表 self.protocols = { 'ARP': ARPProtocol(), 'IP': IPProtocol(), 'ICMP': ICMPProtocol(), 'UDP': UDPProtocol(), 'TCP': TCPProtocol(), } # 网络接口 self.interfaces = {} # 路由表 self.routing_table = RoutingTable() # 连接表 self.connection_table = {}def send(self, data, dest_ip, dest_port, src_port=0, protocol='TCP'):
"""发送数据"""
# 应用层 -> 传输层 -> 网络层 -> 数据链路层 -> 物理层
transport_data = self.layers['transport'].prepare(
data, dest_port, src_port, protocol)network_data = self.layers['network'].prepare( transport_data, dest_ip, protocol) # 查找下一跳地址 next_hop = self.routing_table.lookup(dest_ip) data_link_data = self.layers['data_link'].prepare( network_data, next_hop.mac_address if next_hop else None) physical_data = self.layers['physical'].prepare(data_link_data) # 通过物理层发送 interface = self.get_interface_for_ip(dest_ip) if interface: interface.send(physical_data) return Truedef receive(self, raw_data, interface):
"""接收数据"""
# 物理层 -> 数据链路层 -> 网络层 -> 传输层 -> 应用层
data_link_frame = self.layers['physical'].parse(raw_data)if not data_link_frame: return None # 检查MAC地址 if not self.layers['data_link'].check_destination(data_link_frame): return None network_packet = self.layers['data_link'].parse(data_link_frame) if not network_packet: return None # 检查IP地址 if not self.layers['network'].check_destination(network_packet): return None transport_segment = self.layers['network'].parse(network_packet) if not transport_segment: return None # 根据协议分发到不同处理函数 protocol = transport_segment.get('protocol') if protocol in self.protocols: return self.protocols[protocol].handle(transport_segment) return Nonedef add_interface(self, name, ip_address, mac_address, netmask):
"""添加网络接口"""
interface = NetworkInterface(name, ip_address, mac_address, netmask)
self.interfaces[name] = interface# 添加到路由表 self.routing_table.add_route( ip_address & netmask, # 网络地址 netmask, # 子网掩码 '0.0.0.0', # 网关(直接路由) name, # 接口 0 # 度量值 ) # 添加默认路由(如果有) if ip_address.startswith('192.168.1'): self.routing_table.add_route( '0.0.0.0', # 默认路由 '0.0.0.0', '192.168.1.1', # 默认网关 name, 1 ) -
TCP协议实现
python
class TCPProtocol:
"""TCP协议实现"""
def init(self):
self.state_machine = TCPStateMachine()
self.connections = {} # key: (local_ip, local_port, remote_ip, remote_port)
self.send_buffer = {}
self.receive_buffer = {}
self.sequence_numbers = {}# TCP参数 self.mss = 1460 # 最大段大小 self.window_size = 65535 self.timeout = 3000 # 3秒 self.max_retries = 5 # 计时器 self.timers = {}def connect(self, local_ip, local_port, remote_ip, remote_port):
"""建立TCP连接(三次握手)"""
# 生成初始序列号
init_seq = self.generate_sequence_number()# 创建连接记录 conn_key = (local_ip, local_port, remote_ip, remote_port) self.connections[conn_key] = { 'state': 'SYN_SENT', 'local_seq': init_seq, 'remote_seq': 0, 'window': self.window_size, 'remote_window': 0, 'send_buffer': [], 'receive_buffer': [], 'last_ack': 0, 'last_sent': init_seq, } # 发送SYN包 syn_packet = self.create_packet( local_ip, local_port, remote_ip, remote_port, flags=['SYN'], sequence=init_seq, window=self.window_size ) self.send_packet(syn_packet, conn_key) # 启动重传计时器 self.start_timer(conn_key, 'syn_timeout') return conn_keydef create_packet(self, src_ip, src_port, dst_ip, dst_port,
flags, sequence, ack=0, window=0, data=b''):
"""创建TCP包"""
packet = {
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'flags': set(flags), # SYN, ACK, FIN, RST, PSH, URG
'sequence': sequence,
'acknowledgment': ack,
'window': window,
'urgent_pointer': 0,
'options': [],
'data': data,
'checksum': 0
}# 计算校验和 packet['checksum'] = self.calculate_checksum(packet) return packetdef calculate_checksum(self, packet):
"""计算TCP校验和"""
# 伪头部
pseudo_header = bytearray()# 源IP地址 for part in packet['src_ip'].split('.'): pseudo_header.append(int(part)) # 目的IP地址 for part in packet['dst_ip'].split('.'): pseudo_header.append(int(part)) # 协议类型和TCP长度 pseudo_header.append(0) # 保留 pseudo_header.append(6) # TCP协议号 tcp_len = 20 + len(packet.get('options', [])) + len(packet['data']) pseudo_header.append((tcp_len >> 8) & 0xFF) pseudo_header.append(tcp_len & 0xFF) # TCP头部和数据 tcp_header = bytearray() # 源端口和目的端口 tcp_header.append((packet['src_port'] >> 8) & 0xFF) tcp_header.append(packet['src_port'] & 0xFF) tcp_header.append((packet['dst_port'] >> 8) & 0xFF) tcp_header.append(packet['dst_port'] & 0xFF) # 序列号 seq = packet['sequence'] tcp_header.extend(seq.to_bytes(4, 'big')) # 确认号 ack = packet['acknowledgment'] tcp_header.extend(ack.to_bytes(4, 'big')) # 数据偏移和标志位 data_offset = 5 # 20字节头部,没有选项时为5 if packet.get('options'): data_offset += len(packet['options']) // 4 flags_byte = 0 for flag in packet['flags']: if flag == 'FIN': flags_byte |= 0x01 elif flag == 'SYN': flags_byte |= 0x02 elif flag == 'RST': flags_byte |= 0x04 elif flag == 'PSH': flags_byte |= 0x08 elif flag == 'ACK': flags_byte |= 0x10 elif flag == 'URG': flags_byte |= 0x20 tcp_header.append((data_offset << 4) & 0xF0) tcp_header.append(flags_byte) # 窗口大小 tcp_header.append((packet['window'] >> 8) & 0xFF) tcp_header.append(packet['window'] & 0xFF) # 校验和(先填0)和紧急指针 tcp_header.extend([0, 0]) # 校验和占位 tcp_header.extend([0, 0]) # 紧急指针 # 选项(如果有) if packet.get('options'): tcp_header.extend(packet['options']) # 数据 data = packet.get('data', b'') if data: tcp_header.extend(data) # 补零(如果需要) if len(tcp_header) % 2 == 1: tcp_header.append(0) # 计算校验和(简化版) total = 0 for i in range(0, len(pseudo_header), 2): if i + 1 < len(pseudo_header): word = (pseudo_header[i] << 8) | pseudo_header[i + 1] total += word for i in range(0, len(tcp_header), 2): if i + 1 < len(tcp_header): word = (tcp_header[i] << 8) | tcp_header[i + 1] total += word # 折叠进位 while total >> 16: total = (total & 0xFFFF) + (total >> 16) # 取反 checksum = ~total & 0xFFFF return checksumdef handle_packet(self, packet):
"""处理收到的TCP包"""
# 验证校验和
if not self.validate_checksum(packet):
return# 查找连接 conn_key = (packet['dst_ip'], packet['dst_port'], packet['src_ip'], packet['src_port']) reverse_key = (packet['src_ip'], packet['src_port'], packet['dst_ip'], packet['dst_port']) # 检查连接是否存在 connection = None if conn_key in self.connections: connection = self.connections[conn_key] elif reverse_key in self.connections: # 可能是反向连接 connection = self.connections[reverse_key] if connection: # 更新状态机 self.state_machine.process(connection, packet) else: # 新连接 if 'SYN' in packet['flags'] and 'ACK' not in packet['flags']: # 被动打开 self.passive_open(packet)def passive_open(self, packet):
"""被动打开连接(服务器端)"""
local_ip = packet['dst_ip']
local_port = packet['dst_port']
remote_ip = packet['src_ip']
remote_port = packet['src_port']# 检查端口是否监听 if not self.is_port_listening(local_port): # 发送RST rst_packet = self.create_packet( local_ip, local_port, remote_ip, remote_port, flags=['RST'], sequence=0, ack=packet['sequence'] + 1 ) self.send_packet(rst_packet, None) return # 生成初始序列号 init_seq = self.generate_sequence_number() # 创建连接记录 conn_key = (local_ip, local_port, remote_ip, remote_port) self.connections[conn_key] = { 'state': 'SYN_RECEIVED', 'local_seq': init_seq, 'remote_seq': packet['sequence'] + 1, 'window': self.window_size, 'remote_window': packet['window'], 'send_buffer': [], 'receive_buffer': [], 'last_ack': packet['sequence'] + 1, 'last_sent': init_seq, } # 发送SYN-ACK syn_ack_packet = self.create_packet( local_ip, local_port, remote_ip, remote_port, flags=['SYN', 'ACK'], sequence=init_seq, ack=packet['sequence'] + 1, window=self.window_size ) self.send_packet(syn_ack_packet, conn_key)def send_data(self, conn_key, data):
"""发送数据"""
if conn_key not in self.connections:
raise ConnectionError("连接不存在")connection = self.connections[conn_key] if connection['state'] != 'ESTABLISHED': raise ConnectionError("连接未建立") # 分割数据以适应MSS chunks = [data[i:i+self.mss] for i in range(0, len(data), self.mss)] for chunk in chunks: # 等待窗口空间 while True: window_available = connection['remote_window'] - ( connection['last_sent'] - connection['last_ack']) if window_available >= len(chunk): break # 窗口不足,等待 time.sleep(0.001) # 发送数据包 packet = self.create_packet( conn_key[0], conn_key[1], conn_key[2], conn_key[3], flags=['ACK', 'PSH'], sequence=connection['last_sent'], ack=connection['remote_seq'], window=connection['window'], data=chunk ) self.send_packet(packet, conn_key) # 更新发送状态 connection['last_sent'] += len(chunk) # 添加到重传队列 self.add_to_retransmission_queue(conn_key, packet) # 启动重传计时器 self.start_timer(conn_key, 'data_timeout') -
滑动窗口与拥塞控制
python
class TCPSlidingWindow:
"""TCP滑动窗口管理"""
def init(self, max_window_size=65535):
self.max_window_size = max_window_size
self.send_window = {
'left': 0, # 窗口左边界(已确认)
'right': 0, # 窗口右边界(可发送)
'size': 0, # 当前窗口大小
'sent': {}, # 已发送未确认的数据
'acked': set() # 已确认的序列号
}self.receive_window = { 'left': 0, # 期望接收的序列号 'right': 0, # 可接收的右边界 'size': self.max_window_size, 'received': {}, # 已接收的数据 'out_of_order': {} # 乱序到达的数据 } # 拥塞控制 self.congestion_window = 1 # MSS的数量 self.ssthresh = 65535 # 慢启动阈值 self.duplicate_acks = 0 # 重复ACK计数 self.rtt = 100 # 初始RTT估计(ms) self.rtt_var = 50 # RTT变化估计 self.timeout = 3000 # 重传超时 # TCP Tahoe/Reno算法状态 self.congestion_state = 'SLOW_START' # SLOW_START, CONGESTION_AVOIDANCE, FAST_RECOVERYdef update_send_window(self, ack_number, window_advertisement):
"""更新发送窗口"""
# 移除已确认的数据
seq_numbers = list(self.send_window['sent'].keys())
for seq in seq_numbers:
if seq < ack_number:
del self.send_window['sent'][seq]
self.send_window['acked'].add(seq)# 更新窗口左边界 self.send_window['left'] = ack_number # 更新窗口大小 self.send_window['size'] = min( window_advertisement, self.congestion_window ) # 更新窗口右边界 self.send_window['right'] = ( self.send_window['left'] + self.send_window['size'] )def can_send(self, data_size):
"""检查是否可以发送数据"""
available = self.send_window['right'] - self.send_window['left']
return available >= data_sizedef mark_sent(self, sequence_number, data):
"""标记数据已发送"""
self.send_window['sent'][sequence_number] = {
'data': data,
'time_sent': time.time(),
'retransmit_count': 0
}def handle_ack(self, ack_number):
"""处理ACK"""
# 如果是新的ACK(不是重复的)
if ack_number > self.send_window['left']:
self.duplicate_acks = 0# 更新拥塞窗口 if self.congestion_state == 'SLOW_START': self.congestion_window += 1 if self.congestion_window >= self.ssthresh: self.congestion_state = 'CONGESTION_AVOIDANCE' elif self.congestion_state == 'CONGESTION_AVOIDANCE': self.congestion_window += 1 / self.congestion_window elif self.congestion_state == 'FAST_RECOVERY': self.congestion_window = self.ssthresh self.congestion_state = 'CONGESTION_AVOIDANCE' else: # 重复ACK self.duplicate_acks += 1 if self.duplicate_acks == 3: # 快速重传 self.fast_retransmit() self.congestion_state = 'FAST_RECOVERY' self.ssthresh = max(self.congestion_window // 2, 2) self.congestion_window = self.ssthresh + 3def fast_retransmit(self):
"""快速重传"""
# 找到需要重传的包
earliest_seq = min(self.send_window['sent'].keys())if earliest_seq in self.send_window['sent']: packet_info = self.send_window['sent'][earliest_seq] # 触发重传 self.retransmit_packet(earliest_seq, packet_info['data'])def handle_timeout(self):
"""处理超时"""
# 超时重传
self.ssthresh = max(self.congestion_window // 2, 2)
self.congestion_window = 1
self.congestion_state = 'SLOW_START'
self.duplicate_acks = 0# 重传所有未确认的数据 for seq, info in self.send_window['sent'].items(): self.retransmit_packet(seq, info['data'])def update_receive_window(self, sequence_number, data):
"""更新接收窗口"""
expected_seq = self.receive_window['left']if sequence_number == expected_seq: # 按序到达 self.receive_window['received'][sequence_number] = data self.receive_window['left'] += len(data) # 检查是否有乱序数据可以合并 while self.receive_window['left'] in self.receive_window['out_of_order']: data = self.receive_window['out_of_order'].pop(self.receive_window['left']) self.receive_window['received'][self.receive_window['left']] = data self.receive_window['left'] += len(data) elif sequence_number > expected_seq: # 乱序到达 if sequence_number < self.receive_window['right']: self.receive_window['out_of_order'][sequence_number] = data # 更新窗口右边界 self.receive_window['right'] = ( self.receive_window['left'] + self.receive_window['size'] )
二、 安全机制与权限管理
-
访问控制列表(ACL)
python
class AccessControlList:
"""访问控制列表"""
def init(self):
self.entries = []
self.default_policy = 'DENY' # 默认拒绝def add_rule(self, subject, object, action, permission):
"""添加规则"""
rule = {
'subject': subject, # 用户/进程
'object': object, # 文件/资源
'action': action, # read/write/execute
'permission': permission, # ALLOW/DENY
'priority': len(self.entries) # 优先级
}self.entries.append(rule)def check_permission(self, subject, object, action):
"""检查权限"""
# 查找匹配的规则
matching_rules = []for rule in self.entries: if self.rule_matches(rule, subject, object, action): matching_rules.append(rule) # 按优先级排序 matching_rules.sort(key=lambda x: x['priority']) # 应用第一条匹配的规则 if matching_rules: return matching_rules[0]['permission'] == 'ALLOW' # 默认策略 return self.default_policy == 'ALLOW'def rule_matches(self, rule, subject, object, action):
"""检查规则是否匹配"""
# 检查主体
if rule['subject'] != '*' and rule['subject'] != subject:
return False# 检查客体 if rule['object'] != '*' and rule['object'] != object: return False # 检查动作 if rule['action'] != '*' and rule['action'] != action: return False return True
class CapabilitySystem:
"""能力(Capability)系统"""
def init(self):
self.capabilities = {}
# 标准能力定义
self.standard_caps = {
'CAP_CHOWN': 0x00000001, # 改变文件所有者
'CAP_DAC_OVERRIDE': 0x00000002, # 忽略DAC访问限制
'CAP_DAC_READ_SEARCH': 0x00000004, # 忽略DAC读/搜索限制
'CAP_FOWNER': 0x00000008, # 忽略文件所有者检查
'CAP_FSETID': 0x00000010, # 设置setuid位
'CAP_KILL': 0x00000020, # 发送信号
'CAP_SETGID': 0x00000040, # 设置组ID
'CAP_SETUID': 0x00000080, # 设置用户ID
'CAP_SETPCAP': 0x00000100, # 设置能力
'CAP_NET_BIND_SERVICE': 0x00000200, # 绑定低端口
'CAP_NET_RAW': 0x00000400, # 原始套接字
'CAP_SYS_ADMIN': 0x00000800, # 系统管理
'CAP_SYS_CHROOT': 0x00001000, # chroot
}
def grant_capability(self, process_id, capability):
"""授予能力"""
if process_id not in self.capabilities:
self.capabilities[process_id] = set()
self.capabilities[process_id].add(capability)
def revoke_capability(self, process_id, capability):
"""撤销能力"""
if process_id in self.capabilities:
self.capabilities[process_id].discard(capability)
def check_capability(self, process_id, capability):
"""检查能力"""
if process_id not in self.capabilities:
return False
# 检查具体能力
if capability in self.capabilities[process_id]:
return True
# 检查特殊能力(如CAP_SYS_ADMIN可能包含其他能力)
if 'CAP_SYS_ADMIN' in self.capabilities[process_id]:
return True
return False
def drop_privileges(self, process_id):
"""删除特权"""
if process_id in self.capabilities:
# 保留必要的非特权能力
allowed_caps = {'CAP_NET_BIND_SERVICE'} # 示例:允许绑定低端口
self.capabilities[process_id] = (
self.capabilities[process_id] & allowed_caps
)
-
SELinux风格的安全模块
python
class SecurityEnhancedLinux:
"""SELinux风格的安全模块"""
def init(self):
# 安全上下文数据库
self.contexts = {}# 策略规则 self.policies = [] # 当前模式 self.mode = 'ENFORCING' # ENFORCING, PERMISSIVE, DISABLED # 加载默认策略 self.load_default_policy()def load_default_policy(self):
"""加载默认策略"""
# 系统策略
self.add_policy('system_u', 'system_r', 'kernel_t', 'kernel_t', '', 'ALLOW')
self.add_policy('system_u', 'system_r', 'init_t', 'init_t', '', 'ALLOW')# 用户策略 self.add_policy('user_u', 'user_r', 'user_home_t', 'user_home_t', 'read write', 'ALLOW') self.add_policy('user_u', 'user_r', 'etc_t', 'etc_t', 'read', 'ALLOW') # 网络策略 self.add_policy('user_u', 'user_r', 'port_t', 'tcp_socket', 'name_bind', 'ALLOW', ports=[1024:65535]) self.add_policy('system_u', 'system_r', 'port_t', 'tcp_socket', 'name_bind', 'ALLOW', ports=[0:1023])def add_policy(self, user, role, source_type, target_type, permission, decision, **kwargs):
"""添加策略规则"""
policy = {
'user': user,
'role': role,
'source_type': source_type,
'target_type': target_type,
'permission': permission,
'decision': decision,
'constraints': kwargs
}self.policies.append(policy)def get_context(self, identifier):
"""获取安全上下文"""
if identifier in self.contexts:
return self.contexts[identifier]# 默认上下文 if identifier.startswith('/home/'): return {'user': 'user_u', 'role': 'user_r', 'type': 'user_home_t'} elif identifier.startswith('/etc/'): return {'user': 'system_u', 'role': 'object_r', 'type': 'etc_t'} elif identifier.startswith('/bin/') or identifier.startswith('/usr/bin/'): return {'user': 'system_u', 'role': 'object_r', 'type': 'bin_t'} else: return {'user': 'system_u', 'role': 'object_r', 'type': 'unlabeled_t'}def check_access(self, source_id, target_id, permission):
"""检查访问权限"""
if self.mode == 'DISABLED':
return True# 获取安全上下文 source_ctx = self.get_context(source_id) target_ctx = self.get_context(target_id) # 查找匹配的策略 for policy in self.policies: if self.policy_matches(policy, source_ctx, target_ctx, permission): if self.mode == 'ENFORCING': return policy['decision'] == 'ALLOW' else: # PERMISSIVE模式 if policy['decision'] == 'DENY': print(f"SELinux警告: 在PERMISSIVE模式下拒绝了访问") return True # 默认拒绝 if self.mode == 'ENFORCING': return False else: print(f"SELinux警告: 在PERMISSIVE模式下默认拒绝了访问") return Truedef policy_matches(self, policy, source_ctx, target_ctx, permission):
"""检查策略是否匹配"""
# 检查用户
if policy['user'] != '*' and policy['user'] != source_ctx['user']:
return False# 检查角色 if policy['role'] != '*' and policy['role'] != source_ctx['role']: return False # 检查源类型 if policy['source_type'] != '*' and policy['source_type'] != source_ctx['type']: return False # 检查目标类型 if policy['target_type'] != '*' and policy['target_type'] != target_ctx['type']: return False # 检查权限 if policy['permission'] != '*' and permission not in policy['permission'].split(): return False # 检查其他约束 for key, value in policy['constraints'].items(): if key == 'ports': # 检查端口范围 pass return True -
加密与密钥管理
python
class CryptoSystem:
"""加密系统"""
def init(self):
self.algorithms = {
'AES-256-CBC': AES256CBC(),
'RSA-2048': RSA2048(),
'SHA-256': SHA256(),
'HMAC-SHA256': HMACSHA256(),
}self.key_store = KeyStore() self.random = SecureRandom()def encrypt_file(self, file_path, key_id, algorithm='AES-256-CBC'):
"""加密文件"""
# 读取文件
with open(file_path, 'rb') as f:
plaintext = f.read()# 获取密钥 key = self.key_store.get_key(key_id) if not key: raise KeyError(f"密钥不存在: {key_id}") # 生成IV(初始化向量) iv = self.random.generate_bytes(16) # 加密 cipher = self.algorithms[algorithm] ciphertext = cipher.encrypt(plaintext, key, iv) # 保存加密文件(IV + 密文) encrypted_data = iv + ciphertext with open(file_path + '.enc', 'wb') as f: f.write(encrypted_data) return Truedef decrypt_file(self, encrypted_path, key_id, algorithm='AES-256-CBC'):
"""解密文件"""
# 读取加密文件
with open(encrypted_path, 'rb') as f:
encrypted_data = f.read()# 提取IV和密文 iv = encrypted_data[:16] ciphertext = encrypted_data[16:] # 获取密钥 key = self.key_store.get_key(key_id) if not key: raise KeyError(f"密钥不存在: {key_id}") # 解密 cipher = self.algorithms[algorithm] plaintext = cipher.decrypt(ciphertext, key, iv) # 保存解密文件 decrypted_path = encrypted_path.replace('.enc', '.dec') with open(decrypted_path, 'wb') as f: f.write(plaintext) return True
class AES256CBC:
"""AES-256-CBC加密算法(简化实现)"""
def init(self):
self.block_size = 16 # AES块大小
def encrypt(self, plaintext, key, iv):
"""加密"""
# 填充数据
padded = self.pad(plaintext)
# 分块加密
ciphertext = bytearray()
prev_block = iv
for i in range(0, len(padded), self.block_size):
block = padded[i:i + self.block_size]
# XOR with previous block (CBC模式)
xored = self.xor_bytes(block, prev_block)
# 加密(简化:实际使用AES算法)
encrypted = self.simple_encrypt(xored, key)
ciphertext.extend(encrypted)
prev_block = encrypted
return bytes(ciphertext)
def decrypt(self, ciphertext, key, iv):
"""解密"""
# 分块解密
plaintext = bytearray()
prev_block = iv
for i in range(0, len(ciphertext), self.block_size):
block = ciphertext[i:i + self.block_size]
# 解密(简化)
decrypted = self.simple_decrypt(block, key)
# XOR with previous block
xored = self.xor_bytes(decrypted, prev_block)
plaintext.extend(xored)
prev_block = block
# 移除填充
unpadded = self.unpad(bytes(plaintext))
return unpadded
def pad(self, data):
"""PKCS#7填充"""
pad_len = self.block_size - (len(data) % self.block_size)
padding = bytes([pad_len] * pad_len)
return data + padding
def unpad(self, data):
"""移除PKCS#7填充"""
pad_len = data[-1]
return data[:-pad_len]
def xor_bytes(self, a, b):
"""字节异或"""
return bytes(x ^ y for x, y in zip(a, b))
def simple_encrypt(self, data, key):
"""简化加密(实际应使用真正的AES)"""
# 注意:这只是一个示例,不是真正的AES加密
encrypted = bytearray()
for i, byte in enumerate(data):
encrypted.append((byte + key[i % len(key)]) % 256)
return bytes(encrypted)
def simple_decrypt(self, data, key):
"""简化解密"""
decrypted = bytearray()
for i, byte in enumerate(data):
decrypted.append((byte - key[i % len(key)]) % 256)
return bytes(decrypted)
class KeyStore:
"""密钥存储"""
def init(self):
self.keys = {}
self.master_key = None
def generate_key(self, key_id, algorithm='AES-256'):
"""生成密钥"""
if algorithm == 'AES-256':
key = os.urandom(32) # 256位
elif algorithm == 'RSA-2048':
# 生成RSA密钥对(简化)
key = self.generate_rsa_keypair()
else:
raise ValueError(f"不支持的算法: {algorithm}")
# 加密存储
encrypted_key = self.encrypt_key(key)
self.keys[key_id] = encrypted_key
return key_id
def get_key(self, key_id):
"""获取密钥"""
if key_id not in self.keys:
return None
encrypted_key = self.keys[key_id]
return self.decrypt_key(encrypted_key)
def encrypt_key(self, key):
"""加密密钥(使用主密钥)"""
if not self.master_key:
raise RuntimeError("主密钥未设置")
# 使用主密钥加密
iv = os.urandom(16)
cipher = AES256CBC()
encrypted = cipher.encrypt(key, self.master_key, iv)
return iv + encrypted
def decrypt_key(self, encrypted_key):
"""解密密钥"""
if not self.master_key:
raise RuntimeError("主密钥未设置")
# 提取IV和密文
iv = encrypted_key[:16]
ciphertext = encrypted_key[16:]
# 解密
cipher = AES256CBC()
return cipher.decrypt(ciphertext, self.master_key, iv)
三、 性能优化与调优
-
内存优化技术
python
class MemoryOptimizer:
"""内存优化器"""
def init(self):
self.pool_allocators = {}
self.slab_allocators = {}
self.page_coloring = PageColoring()def create_pool_allocator(self, name, object_size, pool_size=1000):
"""创建对象池分配器"""
allocator = PoolAllocator(object_size, pool_size)
self.pool_allocators[name] = allocator
return allocatordef create_slab_allocator(self, name, sizes):
"""创建Slab分配器"""
allocator = SlabAllocator(sizes)
self.slab_allocators[name] = allocator
return allocator
class PoolAllocator:
"""对象池分配器"""
def init(self, object_size, pool_size=1000):
self.object_size = object_size
self.pool_size = pool_size
self.free_list = []
self.pools = []
# 预分配第一个池
self.allocate_pool()
def allocate(self):
"""分配对象"""
if not self.free_list:
self.allocate_pool()
return self.free_list.pop()
def free(self, obj):
"""释放对象"""
self.free_list.append(obj)
def allocate_pool(self):
"""分配新的对象池"""
# 分配连续内存块
pool_size = self.object_size * self.pool_size
pool = bytearray(pool_size)
# 将对象添加到空闲列表
for i in range(0, pool_size, self.object_size):
obj_ptr = id(pool) + i # 简化:使用ID作为指针
self.free_list.append(obj_ptr)
self.pools.append(pool)
class SlabAllocator:
"""Slab分配器(Linux内核风格)"""
def init(self, sizes):
self.sizes = sorted(sizes)
self.slabs = {}
# 为每种大小创建Slab缓存
for size in self.sizes:
self.slabs[size] = SlabCache(size)
def allocate(self, size):
"""分配内存"""
# 找到合适的大小
for slab_size in self.sizes:
if slab_size >= size:
return self.slabs[slab_size].allocate()
# 没有合适的大小,使用通用分配器
return bytearray(size)
def free(self, ptr, size):
"""释放内存"""
for slab_size in self.sizes:
if slab_size >= size:
if self.slabs[slab_size].contains(ptr):
self.slabs[slab_size].free(ptr)
return
# 不是从Slab分配的,由GC处理
class SlabCache:
"""Slab缓存"""
def init(self, object_size):
self.object_size = object_size
self.slabs = []
self.free_list = []
# 创建第一个Slab
self.grow()
def grow(self):
"""增长Slab缓存"""
# 每个Slab包含多个对象
objects_per_slab = 512
slab_size = self.object_size * objects_per_slab
slab = {
'memory': bytearray(slab_size),
'free_count': objects_per_slab,
'bitmap': [True] * objects_per_slab # True表示空闲
}
self.slabs.append(slab)
# 将所有对象添加到空闲列表
for i in range(objects_per_slab):
obj_ptr = id(slab['memory']) + i * self.object_size
self.free_list.append((obj_ptr, slab, i))
def allocate(self):
"""从Slab分配对象"""
if not self.free_list:
self.grow()
obj_ptr, slab, index = self.free_list.pop()
# 更新位图
slab['bitmap'][index] = False
slab['free_count'] -= 1
return obj_ptr
def free(self, ptr):
"""释放对象回Slab"""
# 查找包含该指针的Slab
for slab in self.slabs:
slab_start = id(slab['memory'])
slab_end = slab_start + len(slab['memory'])
if slab_start <= ptr < slab_end:
# 计算对象索引
index = (ptr - slab_start) // self.object_size
# 更新位图
slab['bitmap'][index] = True
slab['free_count'] += 1
# 添加到空闲列表
self.free_list.append((ptr, slab, index))
return
raise ValueError("指针不属于任何Slab")
class PageColoring:
"""页面着色(缓存优化)"""
def init(self, cache_size_kb=8192, cache_ways=16, page_size_kb=4):
self.cache_size = cache_size_kb * 1024
self.cache_ways = cache_ways
self.page_size = page_size_kb * 1024
# 计算缓存参数
self.cache_sets = self.cache_size // (self.cache_ways * self.page_size)
self.colors = self.cache_sets
# 页面到颜色的映射
self.page_colors = {}
# 每种颜色的页面列表
self.color_pages = {i: [] for i in range(self.colors)}
def get_color(self, page_address):
"""获取页面颜色"""
if page_address in self.page_colors:
return self.page_colors[page_address]
# 计算颜色
set_index = (page_address // self.page_size) % self.cache_sets
color = set_index % self.colors
self.page_colors[page_address] = color
self.color_pages[color].append(page_address)
return color
def allocate_colored_pages(self, count, preferred_color=None):
"""分配指定颜色的页面"""
if preferred_color is not None:
# 尝试分配首选颜色
pages = self.try_allocate_color(preferred_color, count)
if pages:
return pages
# 否则,使用循环分配以减少冲突
pages = []
for i in range(count):
color = (i % self.colors) if preferred_color is None else preferred_color
page = self.allocate_single_page(color)
if page:
pages.append(page)
return pages
def try_allocate_color(self, color, count):
"""尝试分配特定颜色的页面"""
pages = []
for _ in range(count):
if self.color_pages[color]:
page = self.color_pages[color].pop(0)
pages.append(page)
else:
# 分配新页面并着色
page = self.allocate_new_page()
self.page_colors[page] = color
self.color_pages[color].append(page)
pages.append(page)
return pages
-
I/O优化
python
class IOOptimizer:
"""I/O优化器"""
def init(self):
self.read_ahead = ReadAheadBuffer()
self.write_back = WriteBackCache()
self.io_scheduler = IOScheduler()def optimize_read(self, file_path, offset, size):
"""优化读取操作"""
# 检查是否在预读缓冲区中
cached = self.read_ahead.check_cache(file_path, offset, size)
if cached:
return cached# 合并相邻的读取请求 merged_request = self.io_scheduler.merge_read_requests(file_path, offset, size) # 执行读取 data = self.perform_read(merged_request) # 填充预读缓冲区 self.read_ahead.fill_cache(file_path, offset, data) return datadef optimize_write(self, file_path, offset, data):
"""优化写入操作"""
# 添加到写回缓存
self.write_back.add_to_cache(file_path, offset, data)# 异步刷回磁盘 if self.write_back.should_flush(): self.write_back.flush_async() return len(data)
class ReadAheadBuffer:
"""预读缓冲区"""
def init(self, cache_size_mb=100):
self.cache_size = cache_size_mb * 1024 * 1024
self.cache = {} # file_path -> {offset: data}
self.access_pattern = {} # 文件访问模式记录
self.lru = [] # LRU列表
def check_cache(self, file_path, offset, size):
"""检查缓存"""
if file_path not in self.cache:
return None
# 检查请求的数据是否完全在缓存中
file_cache = self.cache[file_path]
# 查找包含请求范围的缓存块
for cache_offset, cache_data in file_cache.items():
cache_end = cache_offset + len(cache_data)
request_end = offset + size
if cache_offset <= offset and cache_end >= request_end:
# 命中缓存,更新LRU
self.update_lru((file_path, cache_offset))
# 返回请求的部分
start_in_cache = offset - cache_offset
return cache_data[start_in_cache:start_in_cache + size]
return None
def fill_cache(self, file_path, offset, data):
"""填充缓存"""
if file_path not in self.cache:
self.cache[file_path] = {}
# 记录访问模式
self.record_access_pattern(file_path, offset, len(data))
# 预测下一次访问
next_offset = self.predict_next_offset(file_path, offset, len(data))
if next_offset is not None:
# 预读数据
prefetch_size = self.determine_prefetch_size(file_path)
self.prefetch_data(file_path, next_offset, prefetch_size)
# 添加到缓存
self.cache[file_path][offset] = data
# 更新LRU
self.update_lru((file_path, offset))
# 如果缓存太大,淘汰最旧的
self.evict_if_needed()
def record_access_pattern(self, file_path, offset, size):
"""记录访问模式"""
if file_path not in self.access_pattern:
self.access_pattern[file_path] = []
self.access_pattern[file_path].append({
'offset': offset,
'size': size,
'time': time.time()
})
# 只保留最近的记录
if len(self.access_pattern[file_path]) > 1000:
self.access_pattern[file_path] = self.access_pattern[file_path][-1000:]
def predict_next_offset(self, file_path, current_offset, current_size):
"""预测下一次访问偏移"""
if file_path not in self.access_pattern:
return None
patterns = self.access_pattern[file_path]
if len(patterns) < 2:
return current_offset + current_size
# 分析访问模式
last_pattern = patterns[-2]
current_pattern = patterns[-1]
# 如果是顺序访问
if (current_pattern['offset'] == last_pattern['offset'] + last_pattern['size'] or
abs(current_pattern['offset'] - (last_pattern['offset'] + last_pattern['size'])) < 4096):
# 预测为顺序访问
return current_offset + current_size
# 如果是随机访问,不预测
return None
def determine_prefetch_size(self, file_path):
"""确定预读大小"""
if file_path not in self.access_pattern:
return 64 * 1024 # 默认64KB
# 根据历史访问大小决定
sizes = [p['size'] for p in self.access_pattern[file_path][-10:]]
avg_size = sum(sizes) / len(sizes) if sizes else 0
# 限制在合理范围内
return min(max(avg_size * 2, 4 * 1024), 1024 * 1024) # 4KB到1MB之间
class WriteBackCache:
"""写回缓存"""
def init(self, cache_size_mb=100, flush_interval=5):
self.cache_size = cache_size_mb * 1024 * 1024
self.flush_interval = flush_interval # 秒
self.cache = {} # file_path -> [(offset, data, dirty), ...]
self.dirty_pages = 0
self.last_flush = time.time()
def add_to_cache(self, file_path, offset, data):
"""添加到写回缓存"""
if file_path not in self.cache:
self.cache[file_path] = []
# 检查是否可以合并到现有缓存项
for i, (cache_offset, cache_data, dirty) in enumerate(self.cache[file_path]):
cache_end = cache_offset + len(cache_data)
new_end = offset + len(data)
# 如果有重叠或相邻
if (offset <= cache_end and new_end >= cache_offset) or \
(offset == cache_end or new_end == cache_offset):
# 合并数据
merged_data = self.merge_data(cache_offset, cache_data, offset, data)
self.cache[file_path][i] = (min(cache_offset, offset), merged_data, True)
self.dirty_pages += 1
return
# 添加新项
self.cache[file_path].append((offset, data, True))
self.dirty_pages += 1
# 按偏移排序
self.cache[file_path].sort(key=lambda x: x[0])
def should_flush(self):
"""检查是否应该刷回"""
# 检查时间间隔
if time.time() - self.last_flush > self.flush_interval:
return True
# 检查脏页数量
if self.dirty_pages > 100:
return True
# 检查缓存大小
cache_bytes = sum(len(data) for file_data in self.cache.values()
for _, data, _ in file_data)
if cache_bytes > self.cache_size * 0.8: # 使用超过80%
return True
return False
def flush_async(self):
"""异步刷回"""
# 在实际系统中,这里会启动一个后台线程
self.flush()
def flush(self):
"""刷回磁盘"""
for file_path, data_list in self.cache.items():
dirty_data = [(offset, data) for offset, data, dirty in data_list if dirty]
if dirty_data:
# 合并相邻的写操作
merged_writes = self.merge_writes(dirty_data)
# 执行写操作
for offset, data in merged_writes:
self.write_to_disk(file_path, offset, data)
# 标记为已清理
for i in range(len(data_list)):
if data_list[i][2]: # dirty
data_list[i] = (data_list[i][0], data_list[i][1], False)
self.dirty_pages = 0
self.last_flush = time.time()
四、 虚拟化技术
-
基于容器的虚拟化
python
class ContainerVirtualization:
"""容器虚拟化"""
def init(self):
self.namespaces = {
'pid': PIDNamespace(),
'net': NetworkNamespace(),
'mnt': MountNamespace(),
'uts': UTSNamespace(),
'ipc': IPCNamespace(),
'user': UserNamespace(),
}self.cgroups = CGroupManager() self.images = ContainerImageManager() self.running_containers = {}def create_container(self, image_name, name=None, command=None):
"""创建容器"""
# 加载镜像
image = self.images.load(image_name)
if not image:
raise ValueError(f"镜像不存在: {image_name}")# 创建容器ID container_id = self.generate_container_id() if name is None: name = container_id[:12] # 创建命名空间 namespaces = {} for ns_type, ns_manager in self.namespaces.items(): namespaces[ns_type] = ns_manager.create_namespace() # 创建cgroup cgroup_path = f"/sys/fs/cgroup/container/{container_id}" self.cgroups.create_cgroup(cgroup_path) # 设置资源限制 self.cgroups.set_limit(cgroup_path, 'cpu', '100000') # 100ms CPU时间 self.cgroups.set_limit(cgroup_path, 'memory', '100M') # 100MB内存 self.cgroups.set_limit(cgroup_path, 'pids', '100') # 最多100个进程 # 创建容器结构 container = { 'id': container_id, 'name': name, 'image': image, 'namespaces': namespaces, 'cgroup': cgroup_path, 'state': 'created', 'pid': None, 'command': command or image['command'], 'created_at': time.time(), 'started_at': None, 'exited_at': None, 'exit_code': None, } self.running_containers[container_id] = container return container_iddef start_container(self, container_id):
"""启动容器"""
if container_id not in self.running_containers:
raise ValueError(f"容器不存在: {container_id}")container = self.running_containers[container_id] # 在单独的进程中启动容器 pid = os.fork() if pid == 0: # 子进程(容器进程) try: # 设置命名空间 for ns_type, ns_id in container['namespaces'].items(): self.namespaces[ns_type].enter_namespace(ns_id) # 加入cgroup self.cgroups.join_cgroup(container['cgroup'], os.getpid()) # 设置根文件系统 self.setup_rootfs(container['image']) # 设置主机名 self.setup_hostname(container['name']) # 设置网络 self.setup_network(container) # 执行容器命令 command = container['command'] if isinstance(command, str): args = shlex.split(command) else: args = command os.execvp(args[0], args) except Exception as e: print(f"容器启动失败: {e}") os._exit(1) else: # 父进程 container['pid'] = pid container['state'] = 'running' container['started_at'] = time.time() return piddef setup_rootfs(self, image):
"""设置根文件系统"""
# 创建临时目录作为根文件系统
rootfs = f"/tmp/container_{os.getpid()}"
os.makedirs(rootfs, exist_ok=True)# 解压镜像层 for layer in image['layers']: self.extract_layer(layer, rootfs) # 挂载proc, sysfs等 os.system(f"mount -t proc proc {rootfs}/proc") os.system(f"mount -t sysfs sysfs {rootfs}/sys") os.system(f"mount -t tmpfs tmpfs {rootfs}/tmp") # 改变根目录 os.chroot(rootfs) os.chdir('/')def extract_layer(self, layer_path, target_dir):
"""解压镜像层"""
# 在实际实现中,这里会解压tar或类似格式
# 这里简化处理
pass
class PIDNamespace:
"""PID命名空间"""
def init(self):
self.namespaces = {} # namespace_id -> {pid: info}
self.next_ns_id = 1
def create_namespace(self):
"""创建PID命名空间"""
ns_id = self.next_ns_id
self.next_ns_id += 1
self.namespaces[ns_id] = {
'next_pid': 1, # 容器内PID从1开始
'pid_map': {}, # 主机PID -> 容器PID
'processes': {} # 容器PID -> 进程信息
}
return ns_id
def enter_namespace(self, ns_id):
"""进入PID命名空间"""
if ns_id not in self.namespaces:
raise ValueError(f"命名空间不存在: {ns_id}")
# 在真实Linux中,这里会调用unshare或setns系统调用
# 这里简化处理
# 记录当前进程
host_pid = os.getpid()
namespace = self.namespaces[ns_id]
container_pid = namespace['next_pid']
namespace['next_pid'] += 1
namespace['pid_map'][host_pid] = container_pid
namespace['processes'][container_pid] = {
'host_pid': host_pid,
'container_pid': container_pid,
'state': 'running'
}
def translate_pid(self, ns_id, pid):
"""在主机PID和容器PID之间转换"""
if ns_id not in self.namespaces:
return None
namespace = self.namespaces[ns_id]
# 容器PID -> 主机PID
for container_pid, info in namespace['processes'].items():
if container_pid == pid:
return info['host_pid']
# 主机PID -> 容器PID
return namespace['pid_map'].get(pid)
class CGroupManager:
"""cgroup管理器"""
def init(self):
self.cgroup_root = '/sys/fs/cgroup'
self.subsystems = ['cpu', 'memory', 'pids', 'blkio', 'devices']
def create_cgroup(self, path):
"""创建cgroup"""
os.makedirs(path, exist_ok=True)
# 为每个子系统创建目录
for subsystem in self.subsystems:
subsystem_path = os.path.join(path, subsystem)
os.makedirs(subsystem_path, exist_ok=True)
def set_limit(self, cgroup_path, subsystem, limit):
"""设置资源限制"""
control_file = os.path.join(cgroup_path, subsystem, f"{subsystem}.max")
with open(control_file, 'w') as f:
f.write(limit)
def join_cgroup(self, cgroup_path, pid):
"""将进程加入cgroup"""
for subsystem in self.subsystems:
tasks_file = os.path.join(cgroup_path, subsystem, 'tasks')
with open(tasks_file, 'w') as f:
f.write(str(pid))
-
系统调用拦截与虚拟化
python
class SystemCallInterceptor:
"""系统调用拦截器(用于容器/虚拟化)"""
def init(self):
self.intercepted_calls = {}
self.virtual_fs = VirtualFileSystem()
self.virtual_network = VirtualNetwork()def intercept_syscall(self, syscall_number, handler):
"""拦截系统调用"""
self.intercepted_calls[syscall_number] = handlerdef handle_intercepted(self, syscall_number, args):
"""处理被拦截的系统调用"""
if syscall_number in self.intercepted_calls:
handler = self.intercepted_calls[syscall_number]
return handler(args)# 默认行为:执行原系统调用 return self.execute_original_syscall(syscall_number, *args)def setup_container_interceptions(self):
"""设置容器需要的系统调用拦截"""
# 拦截文件系统相关调用
self.intercept_syscall(5, self.virtual_open) # open
self.intercept_syscall(6, self.virtual_close) # close
self.intercept_syscall(19, self.virtual_lseek) # lseek# 拦截网络相关调用 self.intercept_syscall(102, self.virtual_socket) # socket self.intercept_syscall(49, self.virtual_bind) # bind self.intercept_syscall(50, self.virtual_listen) # listen # 拦截进程相关调用 self.intercept_syscall(20, self.virtual_getpid) # getpid self.intercept_syscall(39, self.virtual_getppid) # getppid # 拦截系统信息调用 self.intercept_syscall(63, self.virtual_uname) # unamedef virtual_open(self, pathname, flags, mode):
"""虚拟化的open系统调用"""
# 检查路径是否需要重定向
virtual_path = self.virtual_fs.map_path(pathname)# 如果是容器内的路径,重定向到宿主机的对应位置 if virtual_path != pathname: # 执行重定向后的open return self.execute_original_syscall(5, virtual_path, flags, mode) # 否则正常执行 return self.execute_original_syscall(5, pathname, flags, mode)def virtual_getpid(self):
"""虚拟化的getpid系统调用"""
# 返回容器内的PID,而不是主机PID
container_pid = self.get_container_pid(os.getpid())
return container_pid if container_pid else os.getpid()def virtual_uname(self, buf):
"""虚拟化的uname系统调用"""
# 修改系统信息,使其看起来像在容器内
utsname = self.execute_original_syscall(63, buf)# 修改主机名和域名 container_hostname = self.get_container_hostname() if container_hostname: utsname.nodename = container_hostname.encode() return utsname
class VirtualFileSystem:
"""虚拟文件系统(用于容器)"""
def init(self):
self.mount_points = {
'/': '/var/lib/container/rootfs',
'/proc': 'proc',
'/sys': 'sysfs',
'/tmp': 'tmpfs',
}
self.bind_mounts = {}
def map_path(self, path):
"""映射容器内路径到主机路径"""
# 找到最长的匹配挂载点
best_match = ''
best_target = ''
for mount_point, target in self.mount_points.items():
if path.startswith(mount_point):
if len(mount_point) > len(best_match):
best_match = mount_point
best_target = target
if not best_match:
return path
# 如果是特殊文件系统
if best_target in ['proc', 'sysfs', 'tmpfs']:
return path # 不重写,使用容器内的虚拟文件系统
# 计算重写后的路径
relative_path = path[len(best_match):]
if not relative_path.startswith('/'):
relative_path = '/' + relative_path
return best_target + relative_path
def add_bind_mount(self, source, target):
"""添加绑定挂载"""
self.bind_mounts[target] = source
def setup_container_rootfs(self, container_id, image_layers):
"""设置容器根文件系统"""
rootfs_path = f"/var/lib/container/{container_id}/rootfs"
os.makedirs(rootfs_path, exist_ok=True)
# 创建必要的目录
for dirname in ['bin', 'dev', 'etc', 'home', 'lib', 'proc',
'sys', 'tmp', 'usr', 'var']:
os.makedirs(os.path.join(rootfs_path, dirname), exist_ok=True)
# 解压镜像层
for layer in image_layers:
self.extract_layer(layer, rootfs_path)
# 设置挂载点
self.mount_points['/'] = rootfs_path
return rootfs_path
五、 综合实验:构建微内核
python
class Microkernel:
"""微内核架构操作系统核心"""
def init(self):
# 核心服务(在微内核中运行在内核空间)
self.task_manager = TaskManager()
self.inter_process_comm = IPCManager()
self.memory_manager = MemoryManager()
self.interrupt_manager = InterruptManager()
# 服务器(运行在用户空间)
self.servers = {
'fs_server': FileSystemServer(),
'net_server': NetworkServer(),
'gui_server': GUIServer(),
'device_server': DeviceServer(),
}
# 消息传递系统
self.message_queue = MessageQueue()
# 能力系统
self.capability_system = CapabilitySystem()
def initialize(self):
"""初始化微内核"""
print("初始化微内核...")
# 1. 初始化硬件抽象
self.interrupt_manager.setup()
self.memory_manager.setup()
# 2. 启动核心服务
self.task_manager.start()
self.inter_process_comm.start()
# 3. 启动服务器
for server_name, server in self.servers.items():
pid = self.task_manager.create_task(
server.main,
name=f"server_{server_name}",
priority=10
)
server.pid = pid
print("微内核初始化完成")
def handle_syscall(self, syscall_number, args):
"""处理系统调用"""
# 微内核只提供最小的系统调用集
syscalls = {
1: self.sys_send_message, # 发送消息
2: self.sys_receive_message, # 接收消息
3: self.sys_create_task, # 创建任务
4: self.sys_destroy_task, # 销毁任务
5: self.sys_grant_capability, # 授予能力
6: self.sys_revoke_capability, # 撤销能力
7: self.sys_map_memory, # 映射内存
8: self.sys_unmap_memory, # 取消映射内存
}
if syscall_number in syscalls:
return syscalls[syscall_number](*args)
else:
return -1 # 未实现的系统调用
def sys_send_message(self, dest_pid, message, timeout=0):
"""发送消息到另一个进程"""
# 验证能力
if not self.capability_system.check_capability(
self.task_manager.current_pid, 'CAP_IPC'):
return -1
# 将消息放入队列
success = self.message_queue.send(
self.task_manager.current_pid,
dest_pid,
message,
timeout
)
return 0 if success else -1
def sys_receive_message(self, source_pid, buffer, timeout=0):
"""从消息队列接收消息"""
# 验证能力
if not self.capability_system.check_capability(
self.task_manager.current_pid, 'CAP_IPC'):
return -1
# 从队列获取消息
message = self.message_queue.receive(
source_pid,
self.task_manager.current_pid,
timeout
)
if message:
# 复制消息到用户缓冲区
self.copy_to_user(buffer, message)
return len(message)
else:
return 0 # 超时或无消息
def sys_create_task(self, entry_point, priority, capabilities):
"""创建新任务"""
# 验证能力
if not self.capability_system.check_capability(
self.task_manager.current_pid, 'CAP_TASK_MANAGEMENT'):
return -1
# 创建任务
pid = self.task_manager.create_task(
entry_point,
f"task_{self.task_manager.next_pid}",
priority
)
# 授予能力
for cap in capabilities:
self.capability_system.grant_capability(pid, cap)
return pid
def sys_map_memory(self, virtual_address, physical_address, size, flags):
"""映射内存"""
# 验证能力
if not self.capability_system.check_capability(
self.task_manager.current_pid, 'CAP_MEMORY_MANAGEMENT'):
return -1
# 执行映射
success = self.memory_manager.map(
self.task_manager.current_pid,
virtual_address,
physical_address,
size,
flags
)
return 0 if success else -1
class FileSystemServer:
"""文件系统服务器(用户空间)"""
def init(self):
self.pid = None
self.filesystems = {}
self.open_files = {}
self.next_fd = 1000 # 从1000开始,避免与标准流冲突
def main(self):
"""服务器主循环"""
print(f"文件系统服务器启动 (PID: {self.pid})")
while True:
# 等待消息
message = self.receive_message(0) # 接收任何来源的消息
if message:
self.handle_message(message)
def handle_message(self, message):
"""处理消息"""
handler = getattr(self, f"handle_{message['type']}", None)
if handler:
response = handler(message)
self.send_response(message['from'], response)
def handle_open(self, message):
"""处理打开文件请求"""
path = message['path']
flags = message['flags']
mode = message.get('mode', 0o666)
# 查找文件系统
fs = self.find_filesystem_for_path(path)
if not fs:
return {'error': 'ENOENT', 'fd': -1}
# 打开文件
try:
file_obj = fs.open(path, flags, mode)
fd = self.next_fd
self.next_fd += 1
self.open_files[fd] = {
'file': file_obj,
'path': path,
'fs': fs
}
return {'fd': fd, 'error': None}
except PermissionError:
return {'error': 'EACCES', 'fd': -1}
except FileNotFoundError:
return {'error': 'ENOENT', 'fd': -1}
except Exception as e:
return {'error': 'EIO', 'fd': -1}
def handle_read(self, message):
"""处理读取文件请求"""
fd = message['fd']
size = message['size']
if fd not in self.open_files:
return {'error': 'EBADF', 'data': b'', 'bytes_read': 0}
file_info = self.open_files[fd]
try:
data = file_info['file'].read(size)
return {
'error': None,
'data': data,
'bytes_read': len(data)
}
except Exception as e:
return {'error': 'EIO', 'data': b'', 'bytes_read': 0}
def handle_write(self, message):
"""处理写入文件请求"""
fd = message['fd']
data = message['data']
if fd not in self.open_files:
return {'error': 'EBADF', 'bytes_written': 0}
file_info = self.open_files[fd]
try:
bytes_written = file_info['file'].write(data)
return {
'error': None,
'bytes_written': bytes_written
}
except PermissionError:
return {'error': 'EACCES', 'bytes_written': 0}
except Exception as e:
return {'error': 'EIO', 'bytes_written': 0}
def find_filesystem_for_path(self, path):
"""查找路径对应的文件系统"""
# 查找最长的匹配挂载点
best_match = ''
best_fs = None
for mount_point, fs in self.filesystems.items():
if path.startswith(mount_point):
if len(mount_point) > len(best_match):
best_match = mount_point
best_fs = fs
return best_fs
class MessageQueue:
"""消息队列"""
def init(self):
self.queues = {} # pid -> [message]
self.locks = {}
def send(self, sender_pid, receiver_pid, message, timeout=0):
"""发送消息"""
if receiver_pid not in self.queues:
self.queues[receiver_pid] = []
# 添加发送者信息
message['from'] = sender_pid
message['timestamp'] = time.time()
# 添加到接收者队列
self.queues[receiver_pid].append(message)
return True
def receive(self, sender_pid, receiver_pid, timeout=0):
"""接收消息"""
if receiver_pid not in self.queues:
return None
queue = self.queues[receiver_pid]
# 等待消息
start_time = time.time()
while True:
# 查找匹配的消息
for i, message in enumerate(queue):
if sender_pid == 0 or message['from'] == sender_pid:
# 找到匹配的消息
return queue.pop(i)
# 检查超时
if timeout > 0 and time.time() - start_time > timeout:
return None
# 短暂休眠后重试
time.sleep(0.001)
浙公网安备 33010602011771号