文章仅供参考学习

1.LSTM预测

首先去爬取数据

这个是爬取大乐透的,从07年爬到最新一期

import requests
from bs4 import BeautifulSoup
import csv # 目标URL
url = 'http://datachart.500.com/dlt/history/newinc/history.php?start=07001' # 发送HTTP请求
response = requests.get(url)
response.encoding = 'utf-8' # 确保编码正确 # 解析HTML内容
soup = BeautifulSoup(response.text, 'html.parser') # 定位包含开奖数据的表格体
tbody = soup.find('tbody', id="tdata") # 存储开奖数据的列表
lottery_data = [] # 遍历每一行数据
for tr in tbody.find_all('tr'):
tds = tr.find_all('td')
if tds:
# 提取数据并添加到列表
lottery_data.append([td.text for td in tds]) # 写入CSV文件
with open('dlt_lottery_data.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
# 写入标题行
# writer.writerow(['期号', '号码1', '号码2', '号码3', '号码4', '号码5', '号码6', '号码7'])
# 写入数据行
writer.writerows(lottery_data) print('数据抓取完成,并保存到dlt_lottery_data.csv文件中。')

下面是爬取双色球的

import requests
from bs4 import BeautifulSoup
import csv # 目标URL
url = f'http://datachart.500.com/ssq/history/newinc/history.php?start=07001' # 发送HTTP请求
response = requests.get(url)
response.encoding = 'utf-8' # 确保编码正确 # 解析HTML内容
soup = BeautifulSoup(response.text, 'html.parser') # 定位包含开奖数据的表格体
tbody = soup.find('tbody', id="tdata") # 存储开奖数据的列表
lottery_data = [] # 遍历每一行数据
for tr in tbody.find_all('tr'):
tds = tr.find_all('td')
if tds:
# 提取数据并添加到列表
lottery_data.append([td.text for td in tds]) # 写入CSV文件
with open('ssq_lottery_data.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
# 写入标题行
# writer.writerow(['期号', '号码1', '号码2', '号码3', '号码4', '号码5', '号码6', '号码7'])
# 写入数据行
writer.writerows(lottery_data) print('数据抓取完成,并保存到ssq_lottery_data.csv文件中。')

对爬取的数据进行处理

大乐透是5+2,双色球是6+1,两个不同,注意区分。

大乐透的

import csv

import pandas as pd
def get_data(path):
r_data = []
b_data = []
with open(path, 'r') as file:
reader = csv.reader(file)
for row in reader:
r_data.append(list(map(lambda x: int(x), row[1:7])))
b_data.append(list(map(lambda x: int(x), row[7:8])))
r_data.reverse()
b_data.reverse()
return r_data, b_data def process_data():
p = r"./ssq_lottery_data.csv"
r_data, b_data = get_data(p)
# print(b_data)
return r_data, b_data if __name__ == '__main__': process_data()

下面是双色球的

import csv

import pandas as pd
def get_data(path):
r_data = []
b_data = []
with open(path, 'r') as file:
reader = csv.reader(file)
for row in reader:
r_data.append(list(map(lambda x: int(x), row[1:7])))
b_data.append(list(map(lambda x: int(x), row[7:8])))
r_data.reverse()
b_data.reverse()
return r_data, b_data def process_data():
p = r"./ssq_lottery_data.csv"
r_data, b_data = get_data(p)
# print(b_data)
return r_data, b_data if __name__ == '__main__': process_data()

下面开始定义模型

# 定义 LSTM 模型
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=1):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size) def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out

对训练之前的数据进行标准化处理和转换成为tensor格式

def trans_process_data(seq_length):
r_data, b_data = process_data()
# print(r_data) # print(r_data)
r_data = np.array(r_data)
b_data = np.array(b_data)
# 转换为 PyTorch 张量
r_data = torch.tensor(r_data, dtype=torch.float32)
# 转换为 PyTorch 张量
b_data = torch.tensor(b_data, dtype=torch.float32) # 标准化
r_mean = r_data.mean(dim=0)
r_std = r_data.std(dim=0)
r_data = (r_data - r_mean) / r_std # 标准化
b_mean = b_data.mean(dim=0)
b_std = b_data.std(dim=0)
b_data = (b_data - b_mean) / b_std r_train = []
r_target = []
b_train = []
b_target = [] for i in range(len(r_data) - seq_length):
r_train.append(r_data[i:i + seq_length])
r_target.append(r_data[i + seq_length])
r_train = torch.stack(r_train)
r_target = torch.stack(r_target) for i in range(len(b_data) - seq_length):
b_train.append(b_data[i:i + seq_length])
b_target.append(b_data[i + seq_length])
b_train = torch.stack(b_train)
b_target = torch.stack(b_target)
# print(r_train)
return r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std

训练函数

def start_train(input_size, hidden_size, output_size, num_layers, train_data, target_data, num_epochs=100):
model = LSTMModel(input_size, hidden_size, output_size, num_layers)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.05)
# 训练模型
for epoch in range(num_epochs):
model.train()
optimizer.zero_grad()
# 前向传播
outputs = model(train_data)
loss = criterion(outputs, target_data)
# 反向传播和优化
loss.backward()
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
if epoch == int(num_epochs/2):
optimizer = optim.Adam(model.parameters(), lr=0.01)
return model

预测函数

def start_predicted(model, predicted_data):
model.eval()
with torch.no_grad():
test_input = predicted_data.unsqueeze(0) # 使用最后seq_length个时间步作为输入
predicted = model(test_input)
# print("Predicted:", predicted)
return predicted

红球和篮球分开训练预测,开始两个训练预测

def start_all_train(hidden_size, num_layers, num_epochs, seq_length):
r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std = trans_process_data(seq_length)
# print(r_mean, r_std)
r_size = 5
r_model = start_train(r_size, hidden_size, r_size, num_layers, r_train, r_target, num_epochs)
predicted_data = r_data[-seq_length:]
r_predicted = start_predicted(r_model, predicted_data)
print("--------------------------bbbbb-------------------------------------------")
b_size = 2
b_model = start_train(b_size, hidden_size, b_size, num_layers, b_train, b_target, num_epochs)
predicted_data = b_data[-seq_length:]
b_predicted = start_predicted(b_model, predicted_data) print(r_predicted)
print(b_predicted) r_predicted = r_predicted * r_std + r_mean
b_predicted = b_predicted * b_std + b_mean print(r_predicted)
print(b_predicted) return r_predicted, b_predicted

完整代码

import os
import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR) from data_process import process_data
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np # 定义 LSTM 模型
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=1):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size) def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out def trans_process_data(seq_length):
r_data, b_data = process_data()
# print(r_data) # print(r_data)
r_data = np.array(r_data)
b_data = np.array(b_data)
# 转换为 PyTorch 张量
r_data = torch.tensor(r_data, dtype=torch.float32)
# 转换为 PyTorch 张量
b_data = torch.tensor(b_data, dtype=torch.float32) # 标准化
r_mean = r_data.mean(dim=0)
r_std = r_data.std(dim=0)
r_data = (r_data - r_mean) / r_std # 标准化
b_mean = b_data.mean(dim=0)
b_std = b_data.std(dim=0)
b_data = (b_data - b_mean) / b_std r_train = []
r_target = []
b_train = []
b_target = [] for i in range(len(r_data) - seq_length):
r_train.append(r_data[i:i + seq_length])
r_target.append(r_data[i + seq_length])
r_train = torch.stack(r_train)
r_target = torch.stack(r_target) for i in range(len(b_data) - seq_length):
b_train.append(b_data[i:i + seq_length])
b_target.append(b_data[i + seq_length])
b_train = torch.stack(b_train)
b_target = torch.stack(b_target)
# print(r_train)
return r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std def start_train(input_size, hidden_size, output_size, num_layers, train_data, target_data, num_epochs=100):
model = LSTMModel(input_size, hidden_size, output_size, num_layers)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.05)
# 训练模型
for epoch in range(num_epochs):
model.train()
optimizer.zero_grad()
# 前向传播
outputs = model(train_data)
loss = criterion(outputs, target_data)
# 反向传播和优化
loss.backward()
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
if epoch == int(num_epochs/2):
optimizer = optim.Adam(model.parameters(), lr=0.01)
return model def start_predicted(model, predicted_data):
model.eval()
with torch.no_grad():
test_input = predicted_data.unsqueeze(0) # 使用最后seq_length个时间步作为输入
predicted = model(test_input)
# print("Predicted:", predicted)
return predicted def start_all_train(hidden_size, num_layers, num_epochs, seq_length):
r_data, b_data, r_train, r_target, b_train, b_target, r_mean, r_std, b_mean, b_std = trans_process_data(seq_length)
# print(r_mean, r_std)
r_size = 5
r_model = start_train(r_size, hidden_size, r_size, num_layers, r_train, r_target, num_epochs)
predicted_data = r_data[-seq_length:]
r_predicted = start_predicted(r_model, predicted_data)
print("--------------------------bbbbb-------------------------------------------")
b_size = 2
b_model = start_train(b_size, hidden_size, b_size, num_layers, b_train, b_target, num_epochs)
predicted_data = b_data[-seq_length:]
b_predicted = start_predicted(b_model, predicted_data) print(r_predicted)
print(b_predicted) r_predicted = r_predicted * r_std + r_mean
b_predicted = b_predicted * b_std + b_mean print(r_predicted)
print(b_predicted) return r_predicted, b_predicted if __name__ == '__main__':
hidden_size = 20
num_layers = 3
num_epochs = 1000
seq_length = 10 r_predicted, b_predicted = start_all_train(hidden_size, num_layers, num_epochs, seq_length)
# print(r_predicted)
# print(b_predicted)

2.随机预测

下面是随机选号预测

import random
import numpy as np from collections import Counter # 大乐透和双色球不一样
r_len = 5
r_num = 35 b_len = 2
b_num = 12 # 双色球
# r_len = 6
# r_num = 33
#
# b_len = 1
# b_num = 16 number = 100000000
li = [] li_r = []
li_b = []
for i in range(number): r_li = random.sample(range(1, r_num+1), r_len)
b_li = random.sample(range(1, b_num+1), b_len)
li_r.extend(r_li)
li_b.extend(b_li)
print(i) counter_li_r = Counter(li_r)
counter_li_b = Counter(li_b) most_common_li_r = counter_li_r.most_common(r_len)
most_common_li_b = counter_li_b.most_common(b_len)
most_common_li_r = list(map(lambda x: x[0], most_common_li_r))
most_common_li_b = list(map(lambda x: x[0], most_common_li_b))
most_common_li_r.sort()
most_common_li_b.sort() li = most_common_li_r
li.extend(most_common_li_b)
print("most: ", li) most_least_li_r = counter_li_r.most_common()[-r_len-1:-1]
most_least_li_b = counter_li_b.most_common()[-b_len-1:-1]
most_least_li_r = list(map(lambda x: x[0], most_least_li_r))
most_least_li_b = list(map(lambda x: x[0], most_least_li_b))
most_least_li_r.sort()
most_least_li_b.sort()
li = most_least_li_r
li.extend(most_least_li_b)
print("least: ", li)

好运来,恭喜中一等奖

python进行大乐透和双色球选号(LSTM预测和随机选号)的更多相关文章

  1. python 获取大乐透中奖结果

    实现思路: 1.通过urllib库爬取http://zx.500.com/dlt/页面,并过滤出信息 2.将自己的买的彩票的号与开奖号进行匹配,查询是否中奖 3.将中奖结果发生到自己邮箱 caipia ...

  2. python模拟双色球大乐透生成算法

    每天练习一段python代码,健康生活一辈子.晚上下班没事,打开电脑继续编写python代码!今天分享的一个是大家熟悉的双色球彩票的游戏,根据这个进行写的一个python算法,代码精简,肯定有bug, ...

  3. python—模拟生成双色球号和大乐透号

    下边这个脚本,比较适合初级学习基本python语法用.但是,不精炼建议可参考https://www.cnblogs.com/Formulate0303/p/14031748.html的写法. 大乐透玩 ...

  4. Python实现双色球和大乐透摇奖

    实现代码: # code by kadycui # 模块引用 import random def select(): print('\n') print('请选择彩票种类') print('双色球输入 ...

  5. c语言实现双色球和大乐透

    头文件: #include<stdio.h> #include <stdlib.h> #include<string.h> #include <time.h& ...

  6. 基于SpringBoot WebMagic爬虫爬取大乐透双色球

    大乐透网页地址:https://kjh.55128.cn/dlt-history-360.htm 双色球网页地址:https://kjh.55128.cn/ssq-history-120.htm   ...

  7. Python生成随机验证码,大乐透号码

    实例笔记之生成随机号码 扩展知识 - yield(生成器) 随机生成验证码 示例代码: import random # 导入标准模块中的random if __name__ == '__main__' ...

  8. 08 python学习笔记-随机生成大乐透号码(八)

    1 #产生大乐透号码 2 #前区 1-32,5 后区 1-12,2 3 #1.前区从1-32中级取5个,后区再从1-12里面取2个 4 #01 02 03 04 5 def dlt(): #生成随机大 ...

  9. python 写一个生成大乐透号码的程序

    """ 写一个生成大乐透号码的程序 生成随机号码:大乐透分前区号码和后区号码, 前区号码是从01-35中无重复地取5个号码, 后区号码是从01-12中无重复地取2个号码, ...

  10. 大乐透 Java随机码

    package suijishu; import java.util.Random; // TODO Auto-generated method stub public class Xuanqi { ...

随机推荐

  1. 如何优雅地在Django项目里生成不重复的ID?

    前言 本来标题是想叫"生成不重复的四位数"的,不过单纯数字有点局限,推广一下变成不重复 ID 吧~ 这个功能是在做下面图片里这个小项目时遇到的,有点像微信的面对面建群,生成一个随机 ...

  2. manim边做边学--图形的创建与销毁

    上一篇介绍了文字相关的创建和销毁动画,本篇介绍几个用于几何图形的创建和销毁动画效果类. Create:用于在场景中生成一个完整的Mobject(可渲染对象) Uncreate:是Create的逆操作, ...

  3. QT日志类SimpleQtLogger的简单记录

    在现代软件开发中,日志记录是必不可少的部分.它不仅帮助开发者在调试和维护软件时了解程序的运行状态,还能提供关键的错误信息.对于使用Qt框架开发应用程序的开发者来说,选择一个合适的日志库至关重要.本文将 ...

  4. IOS中的Context Menu

    IOS中的Context Menu 通过长按组件或者3D touch方式,周边全部虚化,弹出一个可操作的菜单,并且菜单之间也可以嵌套 IOS13之后已经弃用UIViewControllerPrevie ...

  5. 中电金信通过KCSP认证 云原生能力获权威认可

    ​ 中电金信通过KCSP(Kubernetes Certified Service Provider)认证,正式成为CNCF(云原生计算基金会)官方认证的 Kubernetes 服务提供商. ​ Ku ...

  6. 构建模块化 CLI:Lerna + Commander 打造灵活的基础脚手架

    在现代软件开发中,创建 定制化的命令行工具(CLI) 已成为满足公司业务需求的关键一环.这类工具可以辅助执行诸如代码检查.项目初始化等任务.为了提高开发效率并简化维护过程,我们将功能模块化,并通过多个 ...

  7. Kali Linux上安装Openvas 漏洞分析器

    第一步:安装 apt-get update apt-get install openvas openvas-setup 第二步:自定义密码 openvas-stop #停止openvas服务 open ...

  8. How Liquibase Finds Files: Liquibase Search Path

    https://docs.liquibase.com/concepts/changelogs/how-liquibase-finds-files.html For example, if your r ...

  9. Spring Security并结合JWT实现用户认证(Authentication) 和用户授权(Authorization)

    引言在Web应用开发中,安全一直是非常重要的一个方面.Spring Security基于Spring 框架,提供了一套Web应用安全性的完整解决方案. JwT (JSON Web Token) 是当前 ...

  10. Linux打印显示时间

    Linux打印显示时间具体使用参数如下: 1.输出当前年月日echo $(date +%F)2.输出当前时间(时分)echo $(date +%R)3.输出当前时间(时分秒)echo $(date + ...