scp 命令详解#
scp(Secure Copy)是 Linux 系统中用于在本地和远程主机之间安全复制文件的命令,基于 SSH 协议进行加密传输。它是系统管理员和开发人员进行文件传输的常用工具,支持递归复制、保留文件属性等功能。
入门#
基本用法#
# 从远程主机复制文件到本地
scp user@remote_host:/remote/path/file.txt /local/path/
# 从本地复制文件到远程主机
scp /local/path/file.txt user@remote_host:/remote/path/
# 复制目录(递归)
scp -r /local/directory user@remote_host:/remote/path/
# 指定端口
scp -P 2222 file.txt user@remote_host:/remote/path/常用选项#
| 选项 | 说明 |
|---|---|
-P | 指定 SSH 端口 |
-p | 保留文件属性(修改时间、访问时间、权限) |
-r | 递归复制目录 |
-v | 显示详细输出 |
-q | 静默模式 |
-C | 启用压缩 |
-l | 限制带宽 |
-i | 指定私钥文件 |
基本示例#
# 从远程服务器下载文件
scp user@server.example.com:/var/log/app.log /tmp/
# 上传文件到远程服务器
scp /tmp/data.txt user@server.example.com:/home/user/
# 递归复制目录
scp -r /local/project user@server.example.com:/home/user/
# 使用特定端口
scp -P 2222 file.txt user@server.example.com:/tmp/中级#
文件属性保留#
# 保留文件属性(时间戳、权限)
scp -p file.txt user@remote_host:/remote/path/
# 递归复制并保留属性
scp -rp /local/directory user@remote_host:/remote/path/
# 查看文件属性变化
ls -l file.txt
scp -p file.txt user@remote_host:/remote/path/
ssh user@remote_host "ls -l /remote/path/file.txt"带宽控制#
# 限制传输速度(Kbit/s)
scp -l 1000 large_file.zip user@remote_host:/remote/path/
# 限制传输速度(Mbit/s)
scp -l 10000 large_file.zip user@remote_host:/remote/path/
# 启用压缩传输
scp -C large_file.zip user@remote_host:/remote/path/
# 组合使用压缩和限速
scp -C -l 5000 large_file.zip user@remote_host:/remote/path/多文件传输#
# 复制多个文件到远程主机
scp file1.txt file2.txt file3.txt user@remote_host:/remote/path/
# 使用通配符复制文件
scp *.txt user@remote_host:/remote/path/
# 从远程主机复制多个文件
scp user@remote_host:/remote/path/*.txt /local/path/
# 复制多个目录
scp -r dir1/ dir2/ user@remote_host:/remote/path/高级#
高级选项#
# 使用指定密钥文件
scp -i ~/.ssh/id_rsa file.txt user@remote_host:/remote/path/
# 显示详细传输信息
scp -v file.txt user@remote_host:/remote/path/
# 静默模式
scp -q file.txt user@remote_host:/remote/path/
# 使用 SSH 配置文件
# 在 ~/.ssh/config 中配置:
# Host server1
# HostName server1.example.com
# User admin
# Port 2222
# IdentityFile ~/.ssh/id_rsa_server1
#
# scp file.txt server1:/remote/path/
# 通过跳板主机传输
scp -o ProxyJump=jump_host user@remote_host:/remote/path/file.txt /local/path/断点续传#
#!/bin/bash
# SCP 断点续传脚本
SOURCE_FILE="large_file.zip"
REMOTE_HOST="user@server.example.com"
REMOTE_PATH="/remote/path/"
LOCAL_PATH="/local/path/"
# 使用 rsync 实现断点续传
rsync_partial_transfer() {
local source=$1
local destination=$2
rsync -avz --partial --progress "$source" "$destination"
}
# 检查并恢复传输
resume_transfer() {
local source=$1
local remote=$2
echo "Checking existing file on remote..."
# 获取远程文件大小
local remote_size=$(ssh $REMOTE_HOST "stat -c%s $REMOTE_PATH$SOURCE_FILE 2>/dev/null || echo 0")
if [ $remote_size -gt 0 ]; then
echo "Partial file found (${remote_size} bytes), resuming..."
rsync -avz --partial --progress "$source" "$remote"
else
echo "No partial file found, starting new transfer..."
scp "$source" "$remote"
fi
}
resume_transfer "$LOCAL_PATH$SOURCE_FILE" "$REMOTE_HOST:$REMOTE_PATH"批量传输#
#!/bin/bash
# 批量文件传输脚本
REMOTE_HOST="user@server.example.com"
REMOTE_DIR="/remote/path/"
LOCAL_DIR="/local/path/"
# 批量上传文件
batch_upload() {
local pattern=$1
echo "Uploading files matching: $pattern"
find $LOCAL_DIR -name "$pattern" | while read file; do
echo "Uploading: $file"
scp "$file" "$REMOTE_HOST:$REMOTE_DIR"
done
}
# 批量下载文件
batch_download() {
local pattern=$1
echo "Downloading files matching: $pattern"
ssh $REMOTE_HOST "find $REMOTE_DIR -name '$pattern'" | while read file; do
local filename=$(basename "$file")
echo "Downloading: $filename"
scp "$REMOTE_HOST:$file" "$LOCAL_DIR"
done
}
# 同步目录
sync_directories() {
local source_dir=$1
local target_dir=$2
local direction=$3
case "$direction" in
upload)
echo "Syncing local to remote..."
rsync -avz --delete "$source_dir" "$REMOTE_HOST:$target_dir"
;;
download)
echo "Syncing remote to local..."
rsync -avz --delete "$REMOTE_HOST:$target_dir" "$source_dir"
;;
*)
echo "Invalid direction: $direction"
return 1
;;
esac
}
# 主函数
main() {
case "$1" in
upload)
batch_upload "$2"
;;
download)
batch_download "$2"
;;
sync)
sync_directories "$2" "$3" "$4"
;;
*)
echo "Usage: $0 {upload|download|sync}"
exit 1
;;
esac
}
main "$@"大师#
自动化备份系统#
#!/bin/bash
# 自动化备份系统
CONFIG_FILE="backup_config.conf"
LOG_DIR="backup_logs"
BACKUP_DIR="backups"
mkdir -p $LOG_DIR $BACKUP_DIR
# 加载配置
load_config() {
if [ -f "$CONFIG_FILE" ]; then
source $CONFIG_FILE
else
echo "Config file not found: $CONFIG_FILE"
exit 1
fi
}
# 执行备份
perform_backup() {
local source=$1
local remote_host=$2
local remote_dir=$3
local backup_name=$4
local timestamp=$(date +%Y%m%d_%H%M%S)
local log_file="$LOG_DIR/${backup_name}_${timestamp}.log"
echo "Starting backup: $backup_name"
echo "Source: $source"
echo "Destination: $remote_host:$remote_dir"
# 创建远程备份目录
ssh $remote_host "mkdir -p $remote_dir"
# 执行备份
rsync -avz --delete --progress \
--log-file="$log_file" \
"$source" \
"$remote_host:$remote_dir/${backup_name}_${timestamp}"
if [ $? -eq 0 ]; then
echo "✓ Backup completed successfully"
# 清理旧备份
cleanup_old_backups "$remote_host" "$remote_dir" "$backup_name"
return 0
else
echo "✗ Backup failed"
return 1
fi
}
# 清理旧备份
cleanup_old_backups() {
local remote_host=$1
local remote_dir=$2
local backup_name=$3
local keep_count=${BACKUP_KEEP_COUNT:-7}
echo "Cleaning up old backups (keeping last $keep_count)..."
ssh $remote_host "cd $remote_dir && ls -t ${backup_name}_* | tail -n +$((keep_count + 1)) | xargs -r rm -rf"
}
# 验证备份
verify_backup() {
local remote_host=$1
local remote_dir=$2
local backup_name=$3
echo "Verifying backup: $backup_name"
local latest_backup=$(ssh $remote_host "ls -t $remote_dir/${backup_name}_* | head -1")
if [ -n "$latest_backup" ]; then
echo "Latest backup: $latest_backup"
# 检查备份大小
local backup_size=$(ssh $remote_host "du -sh $latest_backup | awk '{print \$1}'")
echo "Backup size: $backup_size"
# 检查文件数量
local file_count=$(ssh $remote_host "find $latest_backup -type f | wc -l")
echo "File count: $file_count"
return 0
else
echo "✗ No backup found"
return 1
fi
}
# 恢复备份
restore_backup() {
local remote_host=$1
local remote_dir=$2
local backup_name=$3
local restore_dir=$4
echo "Restoring backup: $backup_name"
local latest_backup=$(ssh $remote_host "ls -t $remote_dir/${backup_name}_* | head -1")
if [ -n "$latest_backup" ]; then
echo "Restoring from: $latest_backup"
rsync -avz --progress \
"$remote_host:$latest_backup/" \
"$restore_dir"
if [ $? -eq 0 ]; then
echo "✓ Restore completed successfully"
return 0
else
echo "✗ Restore failed"
return 1
fi
else
echo "✗ No backup found"
return 1
fi
}
# 主函数
main() {
load_config
case "$1" in
backup)
perform_backup "$2" "$3" "$4" "$5"
;;
verify)
verify_backup "$2" "$3" "$4"
;;
restore)
restore_backup "$2" "$3" "$4" "$5"
;;
*)
echo "Usage: $0 {backup|verify|restore}"
exit 1
;;
esac
}
main "$@"分布式文件同步#
#!/bin/bash
# 分布式文件同步系统
CONFIG_FILE="sync_config.conf"
LOG_FILE="sync.log"
# 加载配置
load_config() {
if [ -f "$CONFIG_FILE" ]; then
source $CONFIG_FILE
else
echo "Config file not found: $CONFIG_FILE"
exit 1
fi
}
# 同步到多个服务器
sync_to_multiple_servers() {
local source_dir=$1
local remote_dir=$2
shift 2
local servers=("$@")
echo "Syncing to ${#servers[@]} servers..."
for server in "${servers[@]}"; do
echo "Syncing to $server..."
rsync -avz --delete --progress \
--log-file="$LOG_FILE" \
"$source_dir" \
"$server:$remote_dir"
if [ $? -eq 0 ]; then
echo "✓ Sync to $server completed"
else
echo "✗ Sync to $server failed"
fi
done
}
# 从多个服务器收集文件
collect_from_multiple_servers() {
local remote_dir=$1
local local_dir=$2
shift 2
local servers=("$@")
mkdir -p "$local_dir"
for server in "${servers[@]}"; do
echo "Collecting from $server..."
local server_dir="$local_dir/$(echo $server | tr '@.' '_')"
mkdir -p "$server_dir"
rsync -avz --progress \
--log-file="$LOG_FILE" \
"$server:$remote_dir/" \
"$server_dir"
if [ $? -eq 0 ]; then
echo "✓ Collection from $server completed"
else
echo "✗ Collection from $server failed"
fi
done
}
# 双向同步
bidirectional_sync() {
local local_dir=$1
local remote_host=$2
local remote_dir=$3
echo "Performing bidirectional sync..."
# 本地到远程
echo "Syncing local to remote..."
rsync -avz --update --progress \
"$local_dir/" \
"$remote_host:$remote_dir/"
# 远程到本地
echo "Syncing remote to local..."
rsync -avz --update --progress \
"$remote_host:$remote_dir/" \
"$local_dir/"
echo "✓ Bidirectional sync completed"
}
# 冲突检测
detect_conflicts() {
local local_dir=$1
local remote_host=$2
local remote_dir=$3
echo "Detecting conflicts..."
local conflicts_file="conflicts_$(date +%Y%m%d_%H%M%S).txt"
# 比较文件修改时间
find "$local_dir" -type f | while read local_file; do
local relative_path="${local_file#$local_dir/}"
local remote_file="$remote_host:$remote_dir/$relative_path"
local local_mtime=$(stat -c %Y "$local_file")
local remote_mtime=$(ssh $remote_host "stat -c %Y $remote_dir/$relative_path 2>/dev/null || echo 0")
if [ $local_mtime -ne $remote_mtime ] && [ $remote_mtime -ne 0 ]; then
echo "CONFLICT: $relative_path" >> $conflicts_file
fi
done
if [ -f "$conflicts_file" ] && [ -s "$conflicts_file" ]; then
echo "Conflicts detected:"
cat "$conflicts_file"
return 1
else
echo "No conflicts detected"
return 0
fi
}
# 主函数
main() {
load_config
case "$1" in
sync-multi)
sync_to_multiple_servers "$2" "$3" "${@:4}"
;;
collect)
collect_from_multiple_servers "$2" "$3" "${@:4}"
;;
bidirectional)
bidirectional_sync "$2" "$3" "$4"
;;
detect-conflicts)
detect_conflicts "$2" "$3" "$4"
;;
*)
echo "Usage: $0 {sync-multi|collect|bidirectional|detect-conflicts}"
exit 1
;;
esac
}
main "$@"文件传输监控#
#!/bin/bash
# 文件传输监控系统
LOG_FILE="transfer_monitor.log"
ALERT_EMAIL="admin@example.com"
# 监控传输进度
monitor_transfer() {
local transfer_pid=$1
local source=$2
local destination=$3
echo "Monitoring transfer (PID: $transfer_pid)..."
while kill -0 $transfer_pid 2>/dev/null; do
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local transferred_size=$(du -sh "$source" 2>/dev/null | awk '{print $1}')
echo "$timestamp - Transferred: $transferred_size" >> $LOG_FILE
sleep 10
done
wait $transfer_pid
local exit_code=$?
if [ $exit_code -eq 0 ]; then
echo "✓ Transfer completed successfully"
echo "$(date '+%Y-%m-%d %H:%M:%S') - Transfer completed successfully" >> $LOG_FILE
else
echo "✗ Transfer failed with exit code: $exit_code"
echo "$(date '+%Y-%m-%d %H:%M:%S') - Transfer failed with exit code: $exit_code" >> $LOG_FILE
echo "Transfer failed: $source -> $destination" | mail -s "Transfer Alert" $ALERT_EMAIL
fi
}
# 后台传输并监控
background_transfer_with_monitor() {
local source=$1
local destination=$2
echo "Starting background transfer: $source -> $destination"
# 启动后台传输
scp "$source" "$destination" &
local transfer_pid=$!
# 监控传输
monitor_transfer $transfer_pid "$source" "$destination"
}
# 批量传输监控
batch_transfer_monitor() {
local transfer_list=$1
while IFS='|' read -r source destination; do
if [ -n "$source" ] && [[ ! $source =~ ^# ]]; then
background_transfer_with_monitor "$source" "$destination"
fi
done < "$transfer_list"
}
# 生成传输报告
generate_transfer_report() {
local report_file="transfer_report_$(date +%Y%m%d).txt"
echo "Transfer Report - $(date +%Y-%m-%d)" > $report_file
echo "=========================" >> $report_file
echo "" >> $report_file
echo "Transfer Statistics:" >> $report_file
echo "Total transfers: $(grep "Transfer completed" $LOG_FILE | wc -l)" >> $report_file
echo "Failed transfers: $(grep "Transfer failed" $LOG_FILE | wc -l)" >> $report_file
echo "" >> $report_file
echo "Recent Transfers:" >> $report_file
tail -20 $LOG_FILE >> $report_file
echo "Report saved to: $report_file"
}
# 主函数
main() {
case "$1" in
monitor)
background_transfer_with_monitor "$2" "$3"
;;
batch)
batch_transfer_monitor "$2"
;;
report)
generate_transfer_report
;;
*)
echo "Usage: $0 {monitor|batch|report}"
exit 1
;;
esac
}
main "$@"无敌#
企业级文件传输系统#
#!/bin/bash
# 企业级文件传输系统
CONFIG_DIR="/etc/transfer_system"
LOG_DIR="/var/log/transfer_system"
QUEUE_FILE="/var/run/transfer_system/queue"
STATUS_FILE="/var/run/transfer_system/status"
mkdir -p $CONFIG_DIR $LOG_DIR /var/run/transfer_system
# 初始化队列
init_queue() {
if [ ! -f "$QUEUE_FILE" ]; then
touch "$QUEUE_FILE"
fi
}
# 添加传输任务
add_transfer_task() {
local source=$1
local destination=$2
local priority=$3
local options=$4
local task_id=$(date +%s%N)
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "$task_id|$timestamp|$source|$destination|$priority|$options|pending" >> "$QUEUE_FILE"
echo "Task added: $task_id"
return $task_id
}
# 获取下一个任务
get_next_task() {
if [ ! -f "$QUEUE_FILE" ]; then
return 1
fi
local task=$(sort -t'|' -k5 -n "$QUEUE_FILE" | grep "|pending$" | head -1)
if [ -n "$task" ]; then
echo "$task"
return 0
else
return 1
fi
}
# 更新任务状态
update_task_status() {
local task_id=$1
local status=$2
sed -i "s/^$task_id|.*|$status$/$task_id|$(date '+%Y-%m-%d %H:%M:%S')|||$status/" "$QUEUE_FILE"
}
# 执行传输任务
execute_task() {
local task=$1
local task_id=$(echo "$task" | cut -d'|' -f1)
local source=$(echo "$task" | cut -d'|' -f3)
local destination=$(echo "$task" | cut -d'|' -f4)
local options=$(echo "$task" | cut -d'|' -f6)
local log_file="$LOG_DIR/task_${task_id}.log"
echo "Executing task: $task_id"
echo "Source: $source"
echo "Destination: $destination"
update_task_status "$task_id" "running"
scp $options "$source" "$destination" > "$log_file" 2>&1
if [ $? -eq 0 ]; then
update_task_status "$task_id" "completed"
echo "✓ Task $task_id completed successfully"
else
update_task_status "$task_id" "failed"
echo "✗ Task $task_id failed"
fi
}
# 传输工作进程
transfer_worker() {
local worker_id=$1
echo "Worker $worker_id started"
while true; do
local task=$(get_next_task)
if [ -n "$task" ]; then
local task_id=$(echo "$task" | cut -d'|' -f1)
update_task_status "$task_id" "running"
execute_task "$task"
else
sleep 5
fi
done
}
# 启动多个工作进程
start_workers() {
local worker_count=$1
for ((i=1; i<=worker_count; i++)); do
transfer_worker $i &
done
wait
}
# 查看任务状态
view_task_status() {
local task_id=$1
if [ -z "$task_id" ]; then
echo "=== All Tasks ==="
cat "$QUEUE_FILE"
else
echo "=== Task $task_id ==="
grep "^$task_id|" "$QUEUE_FILE"
fi
}
# 取消任务
cancel_task() {
local task_id=$1
local task=$(grep "^$task_id|" "$QUEUE_FILE")
local status=$(echo "$task" | cut -d'|' -f7)
if [ "$status" = "pending" ]; then
update_task_status "$task_id" "cancelled"
echo "Task $task_id cancelled"
else
echo "Cannot cancel task with status: $status"
fi
}
# 生成统计报告
generate_statistics() {
echo "=== Transfer Statistics ==="
echo ""
local total=$(wc -l < "$QUEUE_FILE")
local pending=$(grep "|pending$" "$QUEUE_FILE" | wc -l)
local running=$(grep "|running$" "$QUEUE_FILE" | wc -l)
local completed=$(grep "|completed$" "$QUEUE_FILE" | wc -l)
local failed=$(grep "|failed$" "$QUEUE_FILE" | wc -l)
local cancelled=$(grep "|cancelled$" "$QUEUE_FILE" | wc -l)
echo "Total tasks: $total"
echo "Pending: $pending"
echo "Running: $running"
echo "Completed: $completed"
echo "Failed: $failed"
echo "Cancelled: $cancelled"
echo ""
echo "Success rate: $(echo "scale=2; $completed * 100 / ($completed + $failed)" | bc)%"
}
# 主函数
main() {
init_queue
case "$1" in
add)
add_transfer_task "$2" "$3" "$4" "$5"
;;
start)
start_workers "${2:-3}"
;;
status)
view_task_status "$2"
;;
cancel)
cancel_task "$2"
;;
stats)
generate_statistics
;;
*)
echo "Usage: $0 {add|start|status|cancel|stats}"
exit 1
;;
esac
}
main "$@"智能文件传输调度器#
#!/bin/bash
# 智能文件传输调度器
CONFIG_FILE="transfer_schedule.conf"
LOG_FILE="transfer_scheduler.log"
# 加载配置
load_config() {
if [ -f "$CONFIG_FILE" ]; then
source $CONFIG_FILE
else
echo "Config file not found: $CONFIG_FILE"
exit 1
fi
}
# 检查网络状态
check_network_status() {
local test_host="8.8.8.8"
if ping -c 1 -W 2 $test_host > /dev/null 2>&1; then
return 0
else
return 1
fi
}
# 检查系统负载
check_system_load() {
local load=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | cut -d',' -f1)
local max_load=${MAX_LOAD:-2.0}
if (( $(echo "$load < $max_load" | bc -l) )); then
return 0
else
return 1
fi
}
# 检查磁盘空间
check_disk_space() {
local path=$1
local min_space=${MIN_DISK_SPACE:-1073741824} # 1GB
local available_space=$(df "$path" | awk 'NR==2 {print $4 * 1024}')
if [ $available_space -gt $min_space ]; then
return 0
else
return 1
fi
}
# 检查传输时间窗口
check_time_window() {
local current_hour=$(date +%H)
local start_hour=${START_HOUR:-0}
local end_hour=${END_HOUR:-6}
if [ $current_hour -ge $start_hour ] && [ $current_hour -lt $end_hour ]; then
return 0
else
return 1
fi
}
# 智能调度传输
schedule_transfer() {
local source=$1
local destination=$2
local task_name=$3
echo "Scheduling transfer: $task_name"
# 检查网络状态
if ! check_network_status; then
echo "Network not available, postponing transfer"
return 1
fi
# 检查系统负载
if ! check_system_load; then
echo "System load too high, postponing transfer"
return 1
fi
# 检查磁盘空间
if ! check_disk_space "$source"; then
echo "Insufficient disk space, postponing transfer"
return 1
fi
# 检查时间窗口
if ! check_time_window; then
echo "Outside allowed time window, postponing transfer"
return 1
fi
# 执行传输
echo "Starting transfer: $task_name"
scp -C -l ${BANDWIDTH_LIMIT:-10000} "$source" "$destination" >> "$LOG_FILE" 2>&1
if [ $? -eq 0 ]; then
echo "✓ Transfer completed: $task_name"
return 0
else
echo "✗ Transfer failed: $task_name"
return 1
fi
}
# 主调度循环
main_scheduler() {
load_config
echo "Starting transfer scheduler..."
echo "Log file: $LOG_FILE"
while true; do
# 从配置文件读取传输任务
while IFS='|' read -r task_name source destination options; do
if [ -n "$task_name" ] && [[ ! $task_name =~ ^# ]]; then
schedule_transfer "$source" "$destination" "$task_name"
fi
done < "$CONFIG_FILE"
# 等待下次检查
sleep ${CHECK_INTERVAL:-300}
done
}
# 单次执行模式
single_run() {
load_config
while IFS='|' read -r task_name source destination options; do
if [ -n "$task_name" ] && [[ ! $task_name =~ ^# ]]; then
schedule_transfer "$source" "$destination" "$task_name"
fi
done < "$CONFIG_FILE"
}
# 主函数
main() {
case "$1" in
start)
main_scheduler
;;
run)
single_run
;;
*)
echo "Usage: $0 {start|run}"
exit 1
;;
esac
}
main "$@"最佳实践#
- 使用 rsync 替代:对于大文件或频繁传输,优先使用 rsync
- 启用压缩:对于文本文件,使用
-C选项启用压缩 - 限制带宽:在生产环境中使用
-l选项限制带宽 - 保留文件属性:使用
-p选项保留文件属性 - 使用 SSH 密钥:配置 SSH 密钥认证,避免密码输入
- 监控传输:对于重要传输,监控传输进度
- 验证传输:传输完成后验证文件完整性
- 使用配置文件:对于复杂的传输任务使用配置文件管理
注意事项#
- scp 传输大文件时可能需要较长时间
- 在生产环境中传输文件时要格外小心
- 使用通配符时要确保匹配正确的文件
- 注意文件权限和所有权的变化
- 传输敏感数据时注意保护隐私
- 在自动化脚本中添加适当的错误处理
- 不同版本的 scp 选项可能有所不同
- 对于大量小文件,考虑打包后传输
- 注意网络中断后的恢复策略